Python keras.layers.core 模块,Flatten() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.core.Flatten()

项目:snake_game    作者:wing3s    | 项目源码 | 文件源码
def main():
    game_width = 12
    game_height = 9
    nb_frames = 4
    actions = ((-1, 0), (1, 0), (0, -1), (0, 1), (0, 0))

    # Recipe of deep reinforcement learning model
    model = Sequential()
    model.add(Convolution2D(
        16,
        nb_row=3,
        nb_col=3,
        activation='relu',
        input_shape=(nb_frames, game_height, game_width)))
    model.add(Convolution2D(32, nb_row=3, nb_col=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(len(actions)))
    model.compile(RMSprop(), 'MSE')

    agent = Agent(
        model, nb_frames, snake_game, actions, size=(game_width, game_height))
    agent.train(nb_epochs=10000, batch_size=64, gamma=0.8, save_model=True)
    agent.play(nb_rounds=10)
项目:taxi    作者:xuguanggen    | 项目源码 | 文件源码
def build_mlp(n_con,n_emb,vocabs_size,n_dis,emb_size,cluster_size):
    hidden_size = 800
    con = Sequential()
    con.add(Dense(input_dim=n_con,output_dim=emb_size))

    emb_list = []
    for i in range(n_emb):
        emb = Sequential()
        emb.add(Embedding(input_dim=vocabs_size[i],output_dim=emb_size,input_length=n_dis))
        emb.add(Flatten())
        emb_list.append(emb)

    model = Sequential()
    model.add(Merge([con] + emb_list,mode='concat'))
    model.add(BatchNormalization())
    model.add(Dense(hidden_size,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(cluster_size,activation='softmax'))
    model.add(Lambda(caluate_point, output_shape =[2]))
    return model
项目:kerlym    作者:osh    | 项目源码 | 文件源码
def simple_cnn(agent, env, dropout=0, learning_rate=1e-3, **args):
  with tf.device("/cpu:0"):
    state = tf.placeholder('float', [None, agent.input_dim])
    S = Input(shape=[agent.input_dim])
    h = Reshape( agent.input_dim_orig )(S)
    h = TimeDistributed( Convolution2D(16, 8, 8, subsample=(4, 4), border_mode='same', activation='relu', dim_ordering='tf'))(h)
#    h = Dropout(dropout)(h)
    h = TimeDistributed( Convolution2D(32, 4, 4, subsample=(2, 2), border_mode='same', activation='relu', dim_ordering='tf'))(h)
    h = Flatten()(h)
#    h = Dropout(dropout)(h)
    h = Dense(256, activation='relu')(h)
#    h = Dropout(dropout)(h)
    h = Dense(128, activation='relu')(h)
    V = Dense(env.action_space.n, activation='linear',init='zero')(h)
    model = Model(S, V)
    model.compile(loss='mse', optimizer=RMSprop(lr=learning_rate) )
    return state, model
项目:dcgan    作者:kyloon    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(64,5,5,
                            border_mode='same',
                            input_shape=(1,28,28),
                            dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th"))
    model.add(Convolution2D(128,5,5, border_mode='same', dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th"))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
                        64, 5, 5,
                        border_mode='same',
                        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        model.add(Conv2D(20, kernel_size=5, padding="same",
            input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # CONV => RELU => POOL
        model.add(Conv2D(50, kernel_size=5, padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # Flatten => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # a softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

# network and training
项目:deep_learning_ex    作者:zatonovo    | 项目源码 | 文件源码
def init_model():
    """
    """
    start_time = time.time()
    print 'Compiling model...'
    model = Sequential()

    model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(.25))

    model.add(Flatten())

    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)

    model.summary()
    return model
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
项目:policy_net_go    作者:gurgehx    | 项目源码 | 文件源码
def get_simple_model():
    model = Sequential()
    model.add(ZeroPadding2D(padding=(3, 3), input_shape=(nb_input_layers, NB_ROWS, NB_COLS)))
    model.add(Convolution2D(96, 5, 5))
    model.add(Activation('relu'))

    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(Convolution2D(192, 3, 3))
    model.add(Activation('relu'))

    model.add(Flatten())

    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    print("Compiling model")
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    print("Compiled model")

    return model

###############################################################################
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def discriminator_model(model_name="discriminator"):
    disc_input = Input(shape=(400, 1), name="discriminator_input")
    aux_input = Input(shape=(47,), name="auxilary_input")

    # Conv Layer 1
    x = Conv1D(filters=100, kernel_size=13, padding='same')(disc_input)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 400
    x = AveragePooling1D(pool_size=20)(x) # ouput shape is 100 x 20

    # Conv Layer 2
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x) # output shape is 250 x 20
    x = AveragePooling1D(pool_size=5)(x) # output shape is 250 x 4

    # Conv Layer 3
    x = Conv1D(filters=300, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x) # output shape is 300 x 4
    x = Flatten()(x) # output shape is 1200

    x = concatenate([x, aux_input], axis=-1) # shape is 1247

    # Dense Layer 1
    x = Dense(200)(x)
    x = LeakyReLU(0.2)(x) # output shape is 200

    # Dense Layer 2
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    discriminator_model = Model(
        outputs=[x], inputs=[disc_input, aux_input], name=model_name)

    return discriminator_model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def lenet5(self):
        model = Sequential()
        model.add(Conv2D(64, (5, 5,), name='conv1',
                         padding='same',
                                activation='relu',
                                input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        # Local Normalization
        model.add(Conv2D(64, (5, 5,), padding='same', activation='relu', name='conv2'))
        # Local Normalization
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool2'))

        model.add(Flatten())
        model.add(Dense(128, activation='relu', name='dense1'))
        model.add(Dropout(0.5))
        model.add(Dense(64, activation='relu', name='dense2'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense3'))

        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def simple_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))

        model.add(Flatten())
        model.add(Dense(64, activation='relu', name='dense2'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense3'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def cuda_cnn(self):
        model = Sequential()
        model.add(Conv2D(32, (5, 5),
                         border_mode='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(contrast normalization)
        model.add(Conv2D(32, (5, 5), border_mode='valid', activation='relu'))
        model.add(AveragePooling2D(border_mode='same'))
        # model.add(contrast normalization)
        model.add(Conv2D(64, (5, 5), border_mode='valid', activation='relu'))
        model.add(AveragePooling2D(border_mode='same'))
        model.add(Flatten())
        model.add(Dense(16, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def small_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense2'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):


        img_input = Input(shape=(img_channels, img_rows, img_cols))

        # one conv at the beginning (spatial size: 32x32)
        x = ZeroPadding2D((1, 1))(img_input)
        x = Convolution2D(16, nb_row=3, nb_col=3)(x)

        # Stage 1 (spatial size: 32x32)
        x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1))
        # Stage 2 (spatial size: 16x16)
        x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2))
        # Stage 3 (spatial size: 8x8)
        x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2))

        x = BatchNormalization(mode=0, axis=1)(x)
        x = Activation('relu')(x)
        x = AveragePooling2D((8, 8), strides=(1, 1))(x)
        x = Flatten()(x)
        preds = Dense(nb_classes, activation='softmax')(x)

        self.model = Model(input=img_input, output=preds)

        self.keras_get_params()
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def VGG_16_KERAS(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model

    base_model = VGG16(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16


# MIN: 1.00 Fast: 60 sec
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def VGG_16_2_v2(classes_number, optim_name='Adam', learning_rate=-1):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.vgg16 import VGG16
    from keras.models import Model
    from keras.layers import Input

    input_tensor = Input(shape=(3, 224, 224))
    base_model = VGG16(input_tensor=input_tensor, include_top=False, weights='imagenet')
    x = base_model.output
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(classes_number, activation='softmax', name='predictions')(x)
    vgg16 = Model(input=base_model.input, output=x)

    optim = get_optim('VGG16_KERAS', optim_name, learning_rate)
    vgg16.compile(optimizer=optim, loss='categorical_crossentropy', metrics=['accuracy'])
    # print(vgg16.summary())
    return vgg16
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_discriminator(Xk_d):
  x = Convolution2D(nb_filter=64, nb_row=5, nb_col=5, subsample=(2,2),
        activation=None, border_mode='same', init='glorot_uniform',
        dim_ordering='th')(Xk_d)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Convolution2D(nb_filter=128, nb_row=5, nb_col=5, subsample=(2,2),
        activation=None, border_mode='same', init='glorot_uniform',
        dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Flatten()(x)
  x = Dense(1024)(x)
  x = BatchNormalization(mode=2)(x)
  x = LeakyReLU(0.2)(x)

  d = Dense(1, activation=None)(x)

  return d
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_discriminator(Xk_d):
  x = Convolution2D(nb_filter=64, nb_row=4, nb_col=4, subsample=(2,2),
        activation=None, border_mode='same', init=conv2D_init,
        dim_ordering='th')(Xk_d)
  # x = BatchNormalization(mode=2, axis=1)(x) # <- makes things much worse!
  x = LeakyReLU(0.2)(x)

  x = Convolution2D(nb_filter=128, nb_row=4, nb_col=4, subsample=(2,2),
        activation=None, border_mode='same', init=conv2D_init,
        dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Flatten()(x)
  x = Dense(1024, init=conv2D_init)(x)
  x = BatchNormalization(mode=2)(x)
  x = LeakyReLU(0.2)(x)

  d = Dense(1, activation=None)(x)

  return d
项目:DeepRL-FlappyBird    作者:hashbangCoder    | 项目源码 | 文件源码
def model_default(input_shape):
    model = Sequential()
    model.add(Convolution2D(32,8,8,subsample=(4,4), border_mode='same',init='he_uniform',input_shape=input_shape))

    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64,4,4, subsample=(2,2),border_mode='same' , init='he_uniform'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64,3,3, subsample=(1,1),border_mode='same' , init='he_uniform'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, init='he_uniform'))
    model.add(Activation('relu'))
    model.add(Dense(2, init='he_uniform'))

    return model


# Model WITH BATCHNORM NO MAXPOOL NO Dropout
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def create_model_2():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def create_model_1():
    inputs = Input((32, 32, 32, 1))

    #noise = GaussianNoise(sigma=0.1)(x)

    conv1 = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = SpatialDropout3D(0.1)(conv1)
    conv1 = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2,2, 2))(conv1)

    x = Flatten()(pool1)
    x = Dense(64, init='normal')(x)
    predictions = Dense(1, init='normal', activation='sigmoid')(x)

    model = Model(input=inputs, output=predictions)
    model.summary()
    optimizer = Adam(lr=1e-5)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy','precision','recall','mean_squared_error','accuracy'])

    return model
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def build_model(dropout):
    model = Sequential()
    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = INPUT_SHAPE))
    model.add(Conv2D(3, (1, 1), activation='relu'))
    model.add(Conv2D(12, (5, 5), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(24, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Conv2D(48, (3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dropout(dropout))
    model.add(Dense(64, activation = 'relu'))
    model.add(Dropout(dropout))
    model.add(Dense(32, activation = 'relu'))
    model.add(Dropout(dropout))
    model.add(Dense(1))

    return model
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def build_model(dropout_rate = 0.2):
    input_image = Input(shape = IMAGE_SHAPE,
                        dtype = 'float32',
                        name = INPUT_IMAGE)
    x = MaxPooling2D()(input_image)
    x = MaxPooling2D()(x)
    x = MaxPooling2D()(x)
    x = MaxPooling2D()(x)
    x = Dropout(dropout_rate)(x)
    x = Conv2D(32, kernel_size=3, strides=(2,2))(x)
    x = MaxPooling2D()(x)
    x = Conv2D(32, kernel_size=3, strides=(2,2))(x)
    x = MaxPooling2D()(x)
    x = Dropout(dropout_rate)(x)
    image_out = Flatten()(x)
    # image_out = Dense(32, activation='relu')(conv)

    input_lidar_panorama = Input(shape = PANORAMA_SHAPE,
                                 dtype = 'float32',
                                 name = INPUT_LIDAR_PANORAMA)
    x = pool_and_conv(input_lidar_panorama)
    x = pool_and_conv(x)
    x = Dropout(dropout_rate)(x)
    panorama_out = Flatten()(x)

    input_lidar_slices = Input(shape = SLICES_SHAPE,
                               dtype = 'float32',
                               name = INPUT_LIDAR_SLICES)
    x = MaxPooling3D(pool_size=(2,2,1))(input_lidar_slices)
    x = Conv3D(32, kernel_size=3, strides=(2,2,1))(x)
    x = MaxPooling3D(pool_size=(2,2,1))(x)
    x = Dropout(dropout_rate)(x)
    x = Conv3D(32, kernel_size=2, strides=(2,2,1))(x)
    x = MaxPooling3D(pool_size=(2,2,1))(x)
    x = Dropout(dropout_rate)(x)
    slices_out = Flatten()(x)

    x = keras.layers.concatenate([image_out, panorama_out, slices_out])

    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(32, activation='relu')(x)

    pose_output = Dense(9, name=OUTPUT_POSE)(x)

    model = Model(inputs=[input_image, input_lidar_panorama, input_lidar_slices],
                  outputs=[pose_output])

    # Fix error with TF and Keras
    import tensorflow as tf
    tf.python.control_flow_ops = tf

    model.compile(loss='mean_squared_error', optimizer='adam')

    return model
项目:keras-recommendation    作者:sonyisme    | 项目源码 | 文件源码
def test_img_clf(self):
        print('image classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32), 
            classification=True, nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Convolution2D(32, 3, 32, 32))
        model.add(Activation('relu'))
        model.add(Flatten())
        model.add(Dense(32, y_test.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
        self.assertTrue(history.validation_accuracy[-1] > 0.9)
项目:HSICNN    作者:jamesbing    | 项目源码 | 文件源码
def Net_model(lr=0.005,decay=1e-6,momentum=0.9):
    model = Sequential()
    model.add(Convolution2D(nb_filters1, nb_conv, nb_conv,
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Convolution2D(nb_filters2, nb_conv, nb_conv))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1000)) #Full connection
    model.add(Activation('tanh'))
    #model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    return model
项目:GAKeras    作者:PetraVidnerova    | 项目源码 | 文件源码
def xtest_net(self):

        input_shape = (28,28,1)

        model = Sequential()
        model.add(MaxPooling2D(pool_size=(3,3), input_shape = input_shape))
        print("----->", model.layers[-1].output_shape)
        model.add(MaxPooling2D(pool_size=(3,3)))
        print("----->", model.layers[-1].output_shape)
        model.add(MaxPooling2D(pool_size=(3,3)))
        print("----->", model.layers[-1].output_shape)

        if model.layers[-1].output_shape[1] >= 2 and model.layers[-1].output_shape[2] >= 2:
            model.add(MaxPooling2D(pool_size=(2,2)))
            print("----->", model.layers[-1].output_shape)
        model.add(Flatten())

        #model.add(Convolution2D(20, 5, 5, border_mode='same'))
        #model.add(MaxPooling2D(pool_size=(2,2)))
        #model.add(MaxPooling2D(pool_size=(2,2)))
        #model.add(MaxPooling2D(pool_size=(2,2)))
        #model.add(Flatten())

        model.summary()
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def custom_objective(y_true, y_pred):
    #prediction = Flatten(name='flatten')(dense_3)
    #prediction = ReRank(k=k, label=1, name='output')(prediction)
    #prediction = SoftReRank(softmink=softmink, softmaxk=softmaxk, label=1, name='output')(prediction)
    '''Just another crossentropy'''
    #y_true = K.clip(y_true, _EPSILON, 1.0-_EPSILON)
    y_true = K.max(y_true)
    #y_armax_index = numpy.argmax(y_pred)
    y_new = K.max(y_pred)
    #y_new = max(y_pred)
    '''
    if y_new >= 0.5:
        y_new_label = 1
    else:
        y_new_label = 0
    cce = abs(y_true - y_new_label)
    '''
    logEps=1e-8
    cce = - (y_true * K.log(y_new+logEps) + (1 - y_true)* K.log(1-y_new + logEps))
    return cce
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def set_cnn_model(ninstance=4, input_dim = 4, input_length = 107):
    nbfilter = 16
    model = Sequential() # #seqs * seqlen * 4
    #model.add(brnn)
    model.add(Conv2D(input_shape=(ninstance, input_length, input_dim),
                            filters=nbfilter,
                            kernel_size=(1,10),
                            padding="valid",
                            #activation="relu",
                            strides=1))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(1,3))) # 32 16
    # model.add(Dropout(0.25)) # will be better
    model.add(Conv2D(filters=nbfilter*2, kernel_size=(1,32), padding='valid', activation='relu', strides=1))
    # model.add(Flatten())
    #model.add(Softmax4D(axis=1))

    #model.add(MaxPooling1D(pool_length=3))
    #model.add(Flatten())
    #model.add(Recalc(axis=1))
    # model.add(Flatten())
    # model.add(Dense(nbfilter*2, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(filters=1, kernel_size=(1,1), padding='valid', activation='sigmoid', strides=1))
    return model
项目:keras-dcgan    作者:jacobgil    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(
            Conv2D(64, (5, 5),
            padding='same',
            input_shape=(28, 28, 1))
            )
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:exit-signs    作者:daniel-j-h    | 项目源码 | 文件源码
def getCNN():
    model = Sequential()

    model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=kNetImageShape))
    model.add(Convolution2D(32, 3, 3, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(kNetNumClasses, activation='softmax'))

    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    return model
项目:RankFace    作者:Entropy-xcy    | 项目源码 | 文件源码
def make_network():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same', input_shape=(128, 128, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    # model.add(Activation('tanh'))

    return model
项目:keras-convautoencoder    作者:nanopony    | 项目源码 | 文件源码
def get_encoder(c, c2, d, mp, mp2):
    encoder = models.Sequential()
    # ====================================================
    encoder.add(c)
    encoder.add(Activation('tanh'))
    encoder.add(mp)
    # ====================================================
    encoder.add(Dropout(0.25))
    # ====================================================
    encoder.add(c2)
    encoder.add(Activation('tanh'))
    encoder.add(mp2)
    # ====================================================
    # model.add(c3)
    # model.add(Activation('tanh'))
    # model.add(mp3)
    # ====================================================
    encoder.add(Dropout(0.25))
    # ====================================================
    encoder.add(Flatten())
    encoder.add(d)
    encoder.add(Activation('tanh'))
    return encoder
项目:keras-convautoencoder    作者:nanopony    | 项目源码 | 文件源码
def build_model(nb_filters=32, nb_pool=2, nb_conv=3):
    model = models.Sequential()
    d = Dense(30)
    c = Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=(1, 28, 28))
    mp =MaxPooling2D(pool_size=(nb_pool, nb_pool))
    # =========      ENCODER     ========================
    model.add(c)
    model.add(Activation('tanh'))
    model.add(mp)
    model.add(Dropout(0.25))
    # =========      BOTTLENECK     ======================
    model.add(Flatten())
    model.add(d)
    model.add(Activation('tanh'))
    # =========      BOTTLENECK^-1   =====================
    model.add(DependentDense(nb_filters * 14 * 14, d))
    model.add(Activation('tanh'))
    model.add(Reshape((nb_filters, 14, 14)))
    # =========      DECODER     =========================
    model.add(DePool2D(mp, size=(nb_pool, nb_pool)))
    model.add(Deconvolution2D(c, border_mode='same'))
    model.add(Activation('tanh'))

    return model
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
                        64, 5, 5,
                        border_mode='same',
                        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:Nerve-Segmentation    作者:matthewzhou    | 项目源码 | 文件源码
def create_model(img_rows, img_cols):
    model = Sequential() #initialize model
    model.add(Convolution2D(4, 3, 3, border_mode='same', activation='relu', init='he_normal',
                            input_shape=(1, img_rows, img_cols)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(8, 3, 3, border_mode='same', activation='relu', init='he_normal'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('softmax'))
    adm = Adamax()
    #sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=adm, loss='categorical_crossentropy')
    return model
项目:Flower_Recognition_CNN    作者:Labyrinth108    | 项目源码 | 文件源码
def model_config(size):
    model = Sequential()

    model.add(Conv2D(32, (5, 5), padding='valid', input_shape=(size, size, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3), padding='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(64, kernel_initializer='he_normal', bias_initializer='zeros'))
    model.add(Activation('tanh'))

    # Softmax??
    model.add(Dense(label_size, kernel_initializer='he_normal', bias_initializer='zeros'))
    model.add(Activation('softmax'))

    return model
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
                        64, 5, 5,
                        border_mode='same',
                        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        model.add(Conv2D(20, kernel_size=5, padding="same",
            input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # CONV => RELU => POOL
        model.add(Conv2D(50, kernel_size=5, padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # Flatten => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # a softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

# network and training
项目:video-action-recognition    作者:ap916    | 项目源码 | 文件源码
def compileFeaturesModel(nb_classes,requiredLines):
    f_model = Sequential()
    f_model.add(Dense(512, input_shape=(167,requiredLines)))
    f_model.add(Flatten())
    f_model.add(Dense(nb_classes, W_regularizer=l2(0.1)))
    f_model.add(Activation('linear'))
    f_model.add(Activation('softmax'))


    f_model.load_weights('features_stream_model.h5')

    print 'Compiling f_model...'
    gc.collect()
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True,clipnorm=0.1)
    f_model.compile(loss='hinge',optimizer=sgd, metrics=['accuracy'])
    return f_model
项目:DL8803    作者:NanditaDamaraju    | 项目源码 | 文件源码
def VGG_16(weights_path=None):
    model = Sequential()
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
    print "convolution"
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(ZeroPadding2D((1,1)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D((2,2), strides=(2,2)))

    model.add(Flatten())
    print"FLATTEN"
    model.add(Dense(400, activation='relu'))
    model.add(Dropout(0.5))
    print"YO"
    model.add(Dense(10, activation='softmax'))

    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
        64, 5, 5,
        border_mode='same',
        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
        64, 5, 5,
        border_mode='same',
        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    # model.add(Activation('sigmoid'))  # LSGANs
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution1D(
        12, 5,
        border_mode='same',
        input_shape=(INPUT_LN, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=N_GEN_l[0]))

    model.add(Convolution1D(12, 5, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_length=N_GEN_l[1]))

    #model.add(Reshape((128*7,)))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
        64, 5, 5,
        border_mode='same',
        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
        64, 5, 5,
        border_mode='same',
        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
        64, 5, 5,
        border_mode='same',
        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model(self):
        model = Sequential()
        model.add(Convolution2D(
            8, 10, 10,
            border_mode='same',
            input_shape=(1, 144, 144)))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Convolution2D(16, 10, 10))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Flatten())
        model.add(Dense(128))
        model.add(Activation('tanh'))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))
        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model(self):
        model = Sequential()
        model.add(Convolution2D(
            8, 10, 10,
            border_mode='same',
            input_shape=(1, 144, 144)))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Convolution2D(16, 10, 10))
        model.add(Activation('tanh'))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Flatten())
        model.add(Dense(128))
        model.add(Activation('tanh'))
        model.add(Dense(1))
        model.add(Activation('sigmoid'))
        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
        64, 5, 5,
        border_mode='same',
        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model