Python keras.layers.normalization 模块,BatchNormalization() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.normalization.BatchNormalization()

项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def make_generator():
    """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
    of size 28x28x1."""
    model = Sequential()
    model.add(Dense(1024, input_dim=100))
    model.add(LeakyReLU())
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    if K.image_data_format() == 'channels_first':
        model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
        bn_axis = 1
    else:
        model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
        bn_axis = -1
    model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Convolution2D(64, (5, 5), padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    # Because we normalized training inputs to lie in the range [-1, 1],
    # the tanh function should be used for the output of the generator to ensure its output
    # also lies in this range.
    model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
    return model
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def cnn_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        outputs = []
        for i in range(len(self.kernel_sizes)):
            output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
                              kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
            output_i = BatchNormalization()(output_i)
            output_i = Activation('relu')(output_i)
            output_i = GlobalMaxPooling1D()(output_i)
            outputs.append(output_i)

        output = concatenate(outputs, axis=1)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_encoder(self,input_shape):
        return [Reshape((*input_shape,1)),
                GaussianNoise(self.parameters['noise']),
                BN(),
                *[Convolution2D(self.parameters['clayer'],(3,3),
                                activation=self.parameters['activation'],padding='same', use_bias=False),
                  Dropout(self.parameters['dropout']),
                  BN(),
                  MaxPooling2D((2,2)),],
                *[Convolution2D(self.parameters['clayer'],(3,3),
                                activation=self.parameters['activation'],padding='same', use_bias=False),
                  Dropout(self.parameters['dropout']),
                  BN(),
                  MaxPooling2D((2,2)),],
                flatten,
                Sequential([
                    Dense(self.parameters['layer'], activation=self.parameters['activation'], use_bias=False),
                    BN(),
                    Dropout(self.parameters['dropout']),
                    Dense(self.parameters['N']*self.parameters['M']),
                ])]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_encoder(self,input_shape):
        return [Reshape((*input_shape,1)),
                GaussianNoise(0.1),
                BN(),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                flatten,]
项目:GlottGAN    作者:bajibabu    | 项目源码 | 文件源码
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim,), name="noise_input")
    aux_input = Input(shape=(aux_dim,), name="auxilary_input")
    x = concatenate([gen_input, aux_input], axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x) 
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x) # shape is 100 x 10

    # Conv Layer 1
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 250
    x = UpSampling1D(size=2)(x) # output shape is 200 x 250

    # Conv Layer 2
    x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 200 x 100
    x = UpSampling1D(size=2)(x) # output shape is 400 x 100

    # Conv Layer 3
    x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x) # final output shape is 400 x 1

    generator_model = Model(
        outputs=[x], inputs=[gen_input, aux_input], name=model_name)

    return generator_model
项目:nli_generation    作者:jstarc    | 项目源码 | 文件源码
def attention_bnorm_model(hidden_size, glove):

    prem_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
    premise_layer = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid')(prem_embeddings)
    premise_bn = BatchNormalization()(premise_layer)
    hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid')(hypo_embeddings)
    hypo_bn = BatchNormalization()(hypo_layer)
    attention = LstmAttentionLayer(output_dim = hidden_size) ([hypo_bn, premise_bn])
    att_bn = BatchNormalization()(attention)
    final_dense = Dense(3, activation='softmax')(att_bn)

    model = Model(input=[prem_input, hypo_input], output=final_dense)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
项目:shenlan    作者:vector-1127    | 项目源码 | 文件源码
def discriminator_model():
    """ return a (b, 1) logits"""
    model = Sequential()
    model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(IN_CH*2, img_cols, img_rows)))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(512, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(Convolution2D(1, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))

    model.add(Activation('sigmoid'))
    return model
项目:lsun_2017    作者:ternaus    | 项目源码 | 文件源码
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10):

    h, w, nch = input_shape
    assert h == w, 'expect input shape (h, w, nch), h == w'

    images = Input(shape = (h, h, nch))
    x = Conv2D(64, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(images)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Conv2D(128, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Flatten()(x)
    outputs = Dense(num_output, kernel_initializer = init,
                    activation = 'softmax')(x)

    model = Model(inputs = images, outputs = outputs)
    return model
项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def _bn_relu_conv(nb_filter, nb_row, nb_col, subsample=False, upsample=False,
                  batch_norm=True, weight_decay=None):

    def f(input):
        processed = input
        if batch_norm:
            processed = BatchNormalization(mode=0, axis=1)(processed)
        processed = Activation('relu')(processed)
        stride = (1, 1)
        if subsample:
            stride = (2, 2)
        if upsample:
            processed = UpSampling2D(size=(2, 2))(processed)
        return Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col,
                             subsample=stride, init='he_normal',
                             border_mode='same',
                             W_regularizer=_l2(weight_decay))(processed)

    return f


# Adds a shortcut between input and residual block and merges them with 'sum'
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def lstm_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh',
                                      kernel_regularizer=l2(self.opt['regul_coef_lstm']),
                                      dropout=self.opt['dropout_rate']))(embed_input)

        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
项目:keras-image-captioning    作者:danieljl    | 项目源码 | 文件源码
def _build_sequence_model(self, sequence_input):
        RNN = GRU if self._rnn_type == 'gru' else LSTM

        def rnn():
            rnn = RNN(units=self._rnn_output_size,
                      return_sequences=True,
                      dropout=self._dropout_rate,
                      recurrent_dropout=self._dropout_rate,
                      kernel_regularizer=self._regularizer,
                      kernel_initializer=self._initializer,
                      implementation=2)
            rnn = Bidirectional(rnn) if self._bidirectional_rnn else rnn
            return rnn

        input_ = sequence_input
        for _ in range(self._rnn_layers):
            input_ = BatchNormalization(axis=-1)(input_)
            rnn_out = rnn()(input_)
            input_ = rnn_out
        time_dist_dense = TimeDistributed(Dense(units=self._vocab_size))(rnn_out)

        return time_dist_dense
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_encoder(self,input_shape):
        return [GaussianNoise(self.parameters['noise']),
                BN(),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['N']*self.parameters['M']),]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        x = Input(shape=input_shape)
        N = input_shape[0] // 2

        y = Sequential([
            flatten,
            *[Sequential([BN(),
                          Dense(self.parameters['layer'],activation=self.parameters['activation']),
                          Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        self.loss = bce
        self.net = Model(x, y)
        # self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
        # self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def build(inp, dropout_rate=0.01):
    enet = initial_block(inp)
    enet = BatchNormalization(momentum=0.1)(enet)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    enet = PReLU(shared_axes=[1, 2])(enet)
    enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate)  # bottleneck 1.0
    for _ in range(4):
        enet = bottleneck(enet, 64, dropout_rate=dropout_rate)  # bottleneck 1.i

    enet = bottleneck(enet, 128, downsample=True)  # bottleneck 2.0
    # bottleneck 2.x and 3.x
    for _ in range(2):
        enet = bottleneck(enet, 128)  # bottleneck 2.1
        enet = bottleneck(enet, 128, dilated=2)  # bottleneck 2.2
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.3
        enet = bottleneck(enet, 128, dilated=4)  # bottleneck 2.4
        enet = bottleneck(enet, 128)  # bottleneck 2.5
        enet = bottleneck(enet, 128, dilated=8)  # bottleneck 2.6
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.7
        enet = bottleneck(enet, 128, dilated=16)  # bottleneck 2.8
    return enet
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def call(self, inputs, **kwargs):
        outputs = K.conv2d(
            inputs,
            self.kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)
        outputs = BatchNormalization(momentum=self.momentum)(outputs)
        if self.activation is not None:
            return self.activation(outputs)
        return outputs
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def build(inp, dropout_rate=0.01):
    pooling_indices = []
    enet, indices_single = initial_block(inp)
    enet = BatchNormalization(momentum=0.1)(enet)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    enet = PReLU(shared_axes=[1, 2])(enet)
    pooling_indices.append(indices_single)
    enet, indices_single = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate)  # bottleneck 1.0
    pooling_indices.append(indices_single)
    for _ in range(4):
        enet = bottleneck(enet, 64, dropout_rate=dropout_rate)  # bottleneck 1.i

    enet, indices_single = bottleneck(enet, 128, downsample=True)  # bottleneck 2.0
    pooling_indices.append(indices_single)
    # bottleneck 2.x and 3.x
    for _ in range(2):
        enet = bottleneck(enet, 128)  # bottleneck 2.1
        enet = bottleneck(enet, 128, dilated=2)  # bottleneck 2.2
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.3
        enet = bottleneck(enet, 128, dilated=4)  # bottleneck 2.4
        enet = bottleneck(enet, 128)  # bottleneck 2.5
        enet = bottleneck(enet, 128, dilated=8)  # bottleneck 2.6
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.7
        enet = bottleneck(enet, 128, dilated=16)  # bottleneck 2.8
    return enet, pooling_indices
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_shared_batchnorm():
    '''Test that a BN layer can be shared
    across different data streams.
    '''
    # Test single layer reuse
    bn = normalization.BatchNormalization(input_shape=(10,), mode=0)
    x1 = Input(shape=(10,))
    bn(x1)

    x2 = Input(shape=(10,))
    y2 = bn(x2)

    x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    model = Model(x2, y2)
    assert len(model.updates) == 2
    model.compile('sgd', 'mse')
    model.train_on_batch(x, x)

    # Test model-level reuse
    x3 = Input(shape=(10,))
    y3 = model(x3)
    new_model = Model(x3, y3)
    assert len(model.updates) == 2
    new_model.compile('sgd', 'mse')
    new_model.train_on_batch(x, x)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def make_discriminator():
    """Creates a discriminator model that takes an image as input and outputs a single value, representing whether
    the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
    Instead, the output should be as large and negative as possible for generated inputs and as large and positive
    as possible for real inputs.

    Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""
    model = Sequential()
    if K.image_data_format() == 'channels_first':
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
    else:
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Flatten())
    model.add(Dense(1024, kernel_initializer='he_normal'))
    model.add(LeakyReLU())
    model.add(Dense(1, kernel_initializer='he_normal'))
    return model
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __initial_conv_block(input, k=1, dropout=0.0, initial=False):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if initial:
        if K.image_dim_ordering() == 'th':
            init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
        else:
            init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)

    x = BatchNormalization(axis=channel_axis)(input)
    x = Activation('relu')(x)
    x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)

    m = add([init, x])
    return m
项目:keras-inception-resnetV2    作者:kentsommer    | 项目源码 | 文件源码
def conv2d_bn(x, nb_filter, nb_row, nb_col,
              border_mode='same', subsample=(1, 1), 
              bias=False, activ_fn='relu', normalize=True):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    if not normalize:
        bias = True
    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      border_mode=border_mode,
                      bias=bias)(x)
    if normalize:
        x = BatchNormalization(axis=channel_axis)(x)
    if activ_fn:
        x = Activation(activ_fn)(x)
    return x
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:copper_price_forecast    作者:liyinwei    | 项目源码 | 文件源码
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
项目:dogsVScats    作者:prajwalkr    | 项目源码 | 文件源码
def conv2d_bn(x, nb_filter, nb_row, nb_col,
              border_mode='same', subsample=(1, 1), bias=False):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1
    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      border_mode=border_mode,
                      bias=bias)(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def small_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense2'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:MixtureOfExperts    作者:krishnakalyan3    | 项目源码 | 文件源码
def small_nn_soft(self, temp):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, name='dense2'))
        model.add(Lambda(lambda x: x / temp))
        model.add(Activation('softmax'))

        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
项目:Theano-MPI    作者:uoguelph-mlrg    | 项目源码 | 文件源码
def build_model(self):


        img_input = Input(shape=(img_channels, img_rows, img_cols))

        # one conv at the beginning (spatial size: 32x32)
        x = ZeroPadding2D((1, 1))(img_input)
        x = Convolution2D(16, nb_row=3, nb_col=3)(x)

        # Stage 1 (spatial size: 32x32)
        x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1))
        # Stage 2 (spatial size: 16x16)
        x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2))
        # Stage 3 (spatial size: 8x8)
        x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2))

        x = BatchNormalization(mode=0, axis=1)(x)
        x = Activation('relu')(x)
        x = AveragePooling2D((8, 8), strides=(1, 1))(x)
        x = Flatten()(x)
        preds = Dense(nb_classes, activation='softmax')(x)

        self.model = Model(input=img_input, output=preds)

        self.keras_get_params()
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def double_conv_layer(x, size, dropout, batch_norm):
    from keras.models import Model
    from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
    from keras.layers.normalization import BatchNormalization
    from keras.layers.core import Dropout, Activation
    conv = Convolution2D(size, 3, 3, border_mode='same')(x)
    if batch_norm == True:
        conv = BatchNormalization(mode=0, axis=1)(conv)
    conv = Activation('relu')(conv)
    conv = Convolution2D(size, 3, 3, border_mode='same')(conv)
    if batch_norm == True:
        conv = BatchNormalization(mode=0, axis=1)(conv)
    conv = Activation('relu')(conv)
    if dropout > 0:
        conv = Dropout(dropout)(conv)
    return conv
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_discriminator(Xk_d):
  x = Convolution2D(nb_filter=64, nb_row=5, nb_col=5, subsample=(2,2),
        activation=None, border_mode='same', init='glorot_uniform',
        dim_ordering='th')(Xk_d)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Convolution2D(nb_filter=128, nb_row=5, nb_col=5, subsample=(2,2),
        activation=None, border_mode='same', init='glorot_uniform',
        dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Flatten()(x)
  x = Dense(1024)(x)
  x = BatchNormalization(mode=2)(x)
  x = LeakyReLU(0.2)(x)

  d = Dense(1, activation=None)(x)

  return d
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
  n_g_hid1 = 1024 # size of hidden layer in generator layer 1
  n_g_hid2 = 128  # size of hidden layer in generator layer 2

  x = Dense(n_g_hid1)(Xk_g)
  x = BatchNormalization(mode=2)(x)
  x = Activation('relu')(x)

  x = Dense(n_g_hid2*7*7)(x)
  x = BatchNormalization(mode=2)(x)
  x = Activation('relu')(x)
  x = Reshape((n_g_hid2, 7, 7))(x)

  x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), 
        border_mode='same', activation=None, subsample=(2,2), 
        init='orthogonal', dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), 
        border_mode='same', activation='sigmoid', subsample=(2,2), 
        init='orthogonal', dim_ordering='th')(x)

  return g
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_discriminator(Xk_d):
  x = Convolution2D(nb_filter=64, nb_row=4, nb_col=4, subsample=(2,2),
        activation=None, border_mode='same', init=conv2D_init,
        dim_ordering='th')(Xk_d)
  # x = BatchNormalization(mode=2, axis=1)(x) # <- makes things much worse!
  x = LeakyReLU(0.2)(x)

  x = Convolution2D(nb_filter=128, nb_row=4, nb_col=4, subsample=(2,2),
        activation=None, border_mode='same', init=conv2D_init,
        dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = LeakyReLU(0.2)(x)

  x = Flatten()(x)
  x = Dense(1024, init=conv2D_init)(x)
  x = BatchNormalization(mode=2)(x)
  x = LeakyReLU(0.2)(x)

  d = Dense(1, activation=None)(x)

  return d
项目:tf-wgan    作者:kuleshov    | 项目源码 | 文件源码
def make_dcgan_generator(Xk_g, n_lat, n_chan=1):
  n_g_hid1 = 1024 # size of hidden layer in generator layer 1
  n_g_hid2 = 128  # size of hidden layer in generator layer 2

  x = Dense(n_g_hid1, init=conv2D_init)(Xk_g)
  x = BatchNormalization(mode=2, )(x)
  x = Activation('relu')(x)

  x = Dense(n_g_hid2*7*7, init=conv2D_init)(x)
  x = Reshape((n_g_hid2, 7, 7))(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  x = Deconvolution2D(64, 5, 5, output_shape=(128, 64, 14, 14), 
        border_mode='same', activation=None, subsample=(2,2), 
        init=conv2D_init, dim_ordering='th')(x)
  x = BatchNormalization(mode=2, axis=1)(x)
  x = Activation('relu')(x)

  g = Deconvolution2D(n_chan, 5, 5, output_shape=(128, n_chan, 28, 28), 
        border_mode='same', activation='sigmoid', subsample=(2,2), 
        init=conv2D_init, dim_ordering='th')(x)

  return g
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def conv2d_bn(x, nb_filter, nb_row, nb_col, border_mode='same', subsample=(1, 1), name=None):
    """
    Utility function to apply conv + BN.
    """
    if name is not None:
        bn_name = name + '_bn'
        conv_name = name + '_conv'
    else:
        bn_name = None
        conv_name = None
    bn_axis = 3
    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      activation='relu',
                      border_mode=border_mode,
                      name=conv_name)(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
    return x
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_medium_conv_batchnorm_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_shape = (input_dim, input_dim, 3)
        num_kernels = 3
        kernel_height = 5
        kernel_width = 5
        data_mean = 2
        data_var = 1

        # Define a model
        from keras.layers.normalization import BatchNormalization
        model = Sequential()
        model.add(Convolution2D(input_shape = input_shape,
            nb_filter = num_kernels, nb_row = kernel_height,
            nb_col = kernel_width))
        model.add(BatchNormalization(epsilon=1e-5))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_mcrnn_music_tagger(self):

        x_in = Input(shape=(4,6,1))
        x = ZeroPadding2D(padding=(0, 1))(x_in)
        x = BatchNormalization(axis=2, name='bn_0_freq')(x)
        # Conv block 1
        x = Convolution2D(2, 3, 3, border_mode='same', name='conv1')(x)
        x = BatchNormalization(axis=3, mode=0, name='bn1')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        # Conv block 2
        x = Convolution2D(4, 3, 3, border_mode='same', name='conv2')(x)
        x = BatchNormalization(axis=3, mode=0, name='bn2')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)

        # Should get you (1,1,2,4)
        x = Reshape((2, 4))(x)
        x = GRU(32, return_sequences=True, name='gru1')(x)
        x = GRU(32, return_sequences=False, name='gru2')(x)

        # Create model.
        model = Model(x_in, x)
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
        self._test_keras_model(model, mode='random_zero_mean', delta=1e-2)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_shared_batchnorm():
    '''Test that a BN layer can be shared
    across different data streams.
    '''
    # Test single layer reuse
    bn = normalization.BatchNormalization(input_shape=(10,), mode=0)
    x1 = Input(shape=(10,))
    bn(x1)

    x2 = Input(shape=(10,))
    y2 = bn(x2)

    x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    model = Model(x2, y2)
    assert len(model.updates) == 2
    model.compile('sgd', 'mse')
    model.train_on_batch(x, x)

    # Test model-level reuse
    x3 = Input(shape=(10,))
    y3 = model(x3)
    new_model = Model(x3, y3)
    assert len(model.updates) == 2
    new_model.compile('sgd', 'mse')
    new_model.train_on_batch(x, x)
项目:kaggle-allstate-claims-severity    作者:alno    | 项目源码 | 文件源码
def nn_mlp(input_shape, params):
    model = Sequential()

    for i, layer_size in enumerate(params['layers']):
        reg = regularizer(params)

        if i == 0:
            model.add(Dense(layer_size, init='he_normal', W_regularizer=reg, input_shape=input_shape))
        else:
            model.add(Dense(layer_size, init='he_normal', W_regularizer=reg))

        if params.get('batch_norm', False):
            model.add(BatchNormalization())

        if 'dropouts' in params:
            model.add(Dropout(params['dropouts'][i]))

        model.add(PReLU())

    model.add(Dense(1, init='he_normal'))

    return model