Python keras.layers 模块,UpSampling2D() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.UpSampling2D()

项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_import(self):
        # Upsample 1D
        model = Sequential()
        model.add(UpSampling1D(size=2, input_shape=(1, 16)))
        model.build()
        self.keras_param_test(model, 0, 2)
        # Upsample 2D
        model = Sequential()
        model.add(UpSampling2D(size=(2, 2), input_shape=(1, 16, 16)))
        model.build()
        self.keras_param_test(model, 0, 3)
        # Upsample 3D
        model = Sequential()
        model.add(UpSampling3D(size=(2, 2, 2), input_shape=(1, 16, 16, 16)))
        model.build()
        self.keras_param_test(model, 0, 4)


# ********** Pooling Layers **********
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def upsample(layer, layer_in, layerId):
    upsampleMap = {
        '1D': UpSampling1D,
        '2D': UpSampling2D,
        '3D': UpSampling3D
    }
    out = {}
    layer_type = layer['params']['layer_type']
    if (layer_type == '1D'):
        size = layer['params']['size_w']
    elif (layer_type == '2D'):
        size = (layer['params']['size_h'], layer['params']['size_w'])
    else:
        size = (layer['params']['size_h'], layer['params']['size_w'],
                layer['params']['size_d'])
    out[layerId] = upsampleMap[layer_type](size=size)(*layer_in)
    return out


# ********** Pooling Layers **********
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def double_conv_layer(x, size, dropout, batch_norm):
    from keras.models import Model
    from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
    from keras.layers.normalization import BatchNormalization
    from keras.layers.core import Dropout, Activation
    conv = Convolution2D(size, 3, 3, border_mode='same')(x)
    if batch_norm == True:
        conv = BatchNormalization(mode=0, axis=1)(conv)
    conv = Activation('relu')(conv)
    conv = Convolution2D(size, 3, 3, border_mode='same')(conv)
    if batch_norm == True:
        conv = BatchNormalization(mode=0, axis=1)(conv)
    conv = Activation('relu')(conv)
    if dropout > 0:
        conv = Dropout(dropout)(conv)
    return conv
项目:enhance    作者:cdiazbas    | 项目源码 | 文件源码
def keepsize_256(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):
    """
    Deep residual network that keeps the size of the input throughout the whole network
    """

    def residual(inputs, n_filters):
        x = ReflectionPadding2D()(inputs)
        x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        x = BatchNormalization()(x)
        x = Activation(activation)(x)
        x = ReflectionPadding2D()(x)
        x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        x = BatchNormalization()(x)
        x = add([x, inputs])

        return x

    inputs = Input(shape=(nx, ny, 1))
    x = GaussianNoise(noise)(inputs)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
    x0 = Activation(activation)(x)

    x = residual(x0, n_filters)

    for i in range(depth-1):
        x = residual(x, n_filters)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization()(x)
    x = add([x, x0])

# Upsampling for superresolution
    x = UpSampling2D()(x)
    x = ReflectionPadding2D()(x)
    x = Conv2D(4*n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
    x = Activation(activation)(x)

    final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)

    return Model(inputs=inputs, outputs=final)
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def create_network():
    input_img = Input(shape=INPUT_SHAPE)

    x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    encoded = MaxPooling2D((2, 2), padding='same')(x)

    # at this point the representation is (4, 4, 8) i.e. 128-dimensional

    x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

    model = Model(input_img, decoded)
    model.compile(optimizer='adadelta', loss='binary_crossentropy')
    return KerasNetwork(model, 'weights_conv_autoencoder.hd5')
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_conv_upsample_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_shape = (input_dim, input_dim, 1)
        num_kernels = 3
        kernel_height = 5
        kernel_width = 5

        # Define a model
        model = Sequential()
        model.add(Conv2D(input_shape = input_shape, 
            filters = num_kernels, kernel_size = (kernel_height, kernel_width)))
        model.add(UpSampling2D(size = 2))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_keras_model(model)
项目:cyclegan_keras    作者:shadySource    | 项目源码 | 文件源码
def mnist_generator(input_shape=(28, 28, 1), scale=1/4):
    x0 = Input(input_shape)
    x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)
    x = residual_block(x, scale, num_id=2)
    x = residual_block(x, scale*2, num_id=3)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(int(1024*scale), (1, 1))(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(1, (1, 1), activation='sigmoid')(x)
    return Model(x0, x)
项目:u-net    作者:yihui-he    | 项目源码 | 文件源码
def _up_block(block,mrge, nb_filters):
    up = merge([Convolution2D(2*nb_filters, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(block)), mrge], mode='concat', concat_axis=1)
    # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(up)
    conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(up)
    conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)

    # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)

    # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)

    return conv


# http://arxiv.org/pdf/1512.03385v1.pdf
# 50 Layer resnet
项目:nn_playground    作者:DingKe    | 项目源码 | 文件源码
def build_generator(latent_size):
    cnn = Sequential()
    cnn.add(Dense(1024, input_dim=latent_size, activation='relu'))
    cnn.add(Dense(128 * 7 * 7, activation='relu'))
    cnn.add(Reshape((128, 7, 7)))

    # upsample to (..., 14, 14)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Conv2D(256, 5, padding='same',
                   activation='relu', kernel_initializer='glorot_normal'))

    # upsample to (..., 28, 28)
    cnn.add(UpSampling2D(size=(2, 2)))
    cnn.add(Conv2D(128, 5, padding='same',
                   activation='relu', kernel_initializer='glorot_normal'))

    # take a channel axis reduction
    cnn.add(Conv2D(1, 2, padding='same',
                   activation='tanh', kernel_initializer='glorot_normal'))

    # this is the z space commonly refered to in GAN papers
    latent = Input(shape=(latent_size,))

    fake_image = cnn(latent)

    return Model(inputs=latent, outputs=fake_image)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def build_decoder(self,input_shape):
        "this function did not converge well. sigh"
        data_dim = np.prod(input_shape)
        last_convolution = 1 + np.array(input_shape) // 4
        first_convolution = last_convolution * 4
        diff = tuple(first_convolution - input_shape)
        crop = [[0,0],[0,0]]
        for i in range(2):
            if diff[i] % 2 == 0:
                for j in range(2):
                    crop[i][j] = diff[i] // 2
            else:
                crop[i][0] = diff[i] // 2
                crop[i][1] = diff[i] // 2 + 1
        crop = ((crop[0][0],crop[0][1]),(crop[1][0],crop[1][1]))
        print(last_convolution,first_convolution,diff,crop)

        return [*([Dropout(self.parameters['dropout'])] if self.parameters['dropout_z'] else []),
                *[Dense(self.parameters['layer'], activation='relu', use_bias=False),
                  BN(),
                  Dropout(self.parameters['dropout']),],
                *[Dense(np.prod(last_convolution) * self.parameters['clayer'], activation='relu', use_bias=False),
                  BN(),
                  Dropout(self.parameters['dropout']),],
                Reshape((*last_convolution, self.parameters['clayer'])),
                *[UpSampling2D((2,2)),
                  Deconvolution2D(self.parameters['clayer'],(3,3), activation='relu',padding='same', use_bias=False),
                  BN(),
                  Dropout(self.parameters['dropout']),],
                *[UpSampling2D((2,2)),
                  Deconvolution2D(1,(3,3), activation='sigmoid',padding='same'),],
                Cropping2D(crop),
                Reshape(input_shape),]
项目:hipsternet    作者:wiseodd    | 项目源码 | 文件源码
def conv_autoencoder(X):
    X = X.reshape(X.shape[0], 28, 28, 1)

    inputs = Input(shape=(28, 28, 1))
    h = Conv2D(4, 3, 3, activation='relu', border_mode='same')(inputs)
    encoded = MaxPooling2D((2, 2))(h)
    h = Conv2D(4, 3, 3, activation='relu', border_mode='same')(encoded)
    h = UpSampling2D((2, 2))(h)
    outputs = Conv2D(1, 3, 3, activation='relu', border_mode='same')(h)

    model = Model(input=inputs, output=outputs)
    model.compile(optimizer='adam', loss='mse')
    model.fit(X, X, batch_size=64, nb_epoch=5)

    return model, Model(input=inputs, output=encoded)
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Upsample']}
        # Conv 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        inp = data(net['l1'], '', 'l1')['l1']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling1D')
        # Conv 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling2D')
        # Conv 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        inp = data(net['l2'], '', 'l2')['l2']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling3D')
项目:Deep-Learning-Experiments    作者:roatienza    | 项目源码 | 文件源码
def generator(self):
        if self.G:
            return self.G
        self.G = Sequential()
        dropout = 0.4
        depth = 64+64+64+64
        dim = 7
        # In: 100
        # Out: dim x dim x depth
        self.G.add(Dense(dim*dim*depth, input_dim=100))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))
        self.G.add(Reshape((dim, dim, depth)))
        self.G.add(Dropout(dropout))

        # In: dim x dim x depth
        # Out: 2*dim x 2*dim x depth/2
        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        # Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
        self.G.add(Conv2DTranspose(1, 5, padding='same'))
        self.G.add(Activation('sigmoid'))
        self.G.summary()
        return self.G
项目:ssgan    作者:samrussell    | 项目源码 | 文件源码
def build_models(self, input_shape):
    middle_neurons = 10

    self.encoder = Sequential()
    self.encoder.add(Conv2D(64, (5, 5), strides=(2, 2), padding = 'same', input_shape=input_shape))
    self.encoder.add(Activation(selu))
    self.encoder.add(Conv2D(128, (5, 5), strides=(2, 2), padding = 'same'))
    self.encoder.add(Activation(selu))
    self.encoder.add(Flatten())
    self.encoder.add(Dense(middle_neurons))
    self.encoder.add(Activation('sigmoid'))
    self.encoder.summary()

    self.decoder = Sequential()
    self.decoder.add(Dense(7*7*128, input_shape=(middle_neurons,)))
    self.decoder.add(Activation(selu))
    if keras.backend.image_data_format() == 'channels_first':
        self.decoder.add(Reshape([128, 7, 7]))
    else:    
        self.decoder.add(Reshape([7, 7, 128]))
    self.decoder.add(UpSampling2D(size=(2, 2)))
    self.decoder.add(Conv2D(64, (5, 5), padding='same'))
    self.decoder.add(Activation(selu))
    self.decoder.add(UpSampling2D(size=(2, 2)))
    self.decoder.add(Conv2D(1, (5, 5), padding='same'))
    self.decoder.add(Activation('sigmoid'))
    self.decoder.summary()

    self.autoencoder = Sequential()
    self.autoencoder.add(self.encoder)
    self.autoencoder.add(self.decoder)
    self.autoencoder.compile(loss='mean_squared_error',
                                  optimizer=Adam(lr=1e-4),
                                  metrics=['accuracy'])
项目:enhance    作者:cdiazbas    | 项目源码 | 文件源码
def keepsize(nx, ny, noise, depth, activation='relu', n_filters=64, l2_reg=1e-7):
    """
    Deep residual network that keeps the size of the input throughout the whole network
    """

    def residual(inputs, n_filters):
        x = ReflectionPadding2D()(inputs)
        x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        x = BatchNormalization()(x)
        x = Activation(activation)(x)
        x = ReflectionPadding2D()(x)
        x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        x = BatchNormalization()(x)
        x = add([x, inputs])

        return x

    inputs = Input(shape=(nx, ny, 1))
    x = GaussianNoise(noise)(inputs)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
    x0 = Activation(activation)(x)

    x = residual(x0, n_filters)

    for i in range(depth-1):
        x = residual(x, n_filters)

    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization()(x)
    x = add([x, x0])

# Upsampling for superresolution
    x = UpSampling2D()(x)
    x = ReflectionPadding2D()(x)
    x = Conv2D(n_filters, (3, 3), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
    x = Activation(activation)(x)

    final = Conv2D(1, (1, 1), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)

    return Model(inputs=inputs, outputs=final)
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def test_delete_channels_upsampling2d(channel_index, data_format):
    layer = UpSampling2D([2, 3], data_format=data_format)
    layer_test_helper_flatten_2d(layer, channel_index, data_format)
项目:image-segmentation-keras    作者:divamgupta    | 项目源码 | 文件源码
def Unet (nClasses , optimizer=None , input_width=360 , input_height=480 , nChannels=1 ): 

    inputs = Input((nChannels, input_height, input_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)

    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)

    conv6 = Convolution2D(nClasses, 1, 1, activation='relu',border_mode='same')(conv5)
    conv6 = core.Reshape((nClasses,input_height*input_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)


    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )

    return model
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_upsample(self):
        """
        Test the conversion of 2D convolutional layer + upsample
        """
        from keras.layers import Convolution2D, UpSampling2D

        # Create a simple Keras model
        model = Sequential()
        model.add(Convolution2D(input_shape=(64, 64, 3), nb_filter=32,
            nb_row=5, nb_col=5))
        model.add(UpSampling2D(size = (2, 2)))
        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        self.assertItemsEqual(input_names,
               map(lambda x: x.name, spec.description.input))
        self.assertEquals(len(spec.description.output), len(output_names))
        self.assertItemsEqual(output_names,
               map(lambda x: x.name, spec.description.output))

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.convolution)
        layer_1 = layers[1]
        self.assertIsNotNone(layer_1.upsample)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_upsample(self):
        """
        Test the conversion of 2D convolutional layer + upsample
        """
        from keras.layers import Conv2D, UpSampling2D

        # Create a simple Keras model
        model = Sequential()
        model.add(Conv2D(input_shape=(64, 64, 3), filters=32,
            kernel_size=(5,5)))
        model.add(UpSampling2D(size = (2, 2)))
        input_names = ['input']
        output_names = ['output']
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField('neuralNetwork'))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        self.assertEqual(sorted(input_names),
               sorted(map(lambda x: x.name, spec.description.input)))
        self.assertEquals(len(spec.description.output), len(output_names))
        self.assertEqual(sorted(output_names),
               sorted(map(lambda x: x.name, spec.description.output)))

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.convolution)
        layer_1 = layers[1]
        self.assertIsNotNone(layer_1.upsample)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_upsample_layer_params(self):
        options = dict(
            size= [(2,2), (3,3), (4,4), (5,5)]
        )

        np.random.seed(1988)
        input_dim = 10
        input_shape = (input_dim, input_dim, 1)
        X = np.random.rand(1, *input_shape)

        # Define a function that tests a model
        def build_model(x):
            kwargs = dict(zip(options.keys(), x))
            model = Sequential()
            model.add(Conv2D(filters=5, kernel_size=(7,7), 
                      input_shape = input_shape))
            model.add(UpSampling2D(**kwargs))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param)
项目:CycleGAN-keras    作者:Shaofanl    | 项目源码 | 文件源码
def scaleup(input, ngf, kss, strides, padding):
#   x = Conv2DTranspose(ngf, kss, strides=strides, padding=padding)(input)

    # upsample + conv
    x = UpSampling2D(strides)(input)
    x = Conv2D(ngf, kss, padding=padding)(x)
    return x
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def build_conv_autoencoder(input_dim=(28, 28, 1)):
    input_img = Input(shape=input_dim)  # adapt this if using `channels_first` image data format

    x = Conv2D(64, (3, 3), activation='relu', padding='same')(input_img)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), padding='same')(x)
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    encoded = MaxPooling2D((2, 2), padding='same')(x)

    # at this point the representation is (4, 4, 8) i.e. 128-dimensional

    x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    if input_dim[0] == 28:
        x = Conv2D(64, (3, 3), activation='relu')(x)
    else:
        x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)

    x = UpSampling2D((2, 2))(x)
    decoded = Conv2D(input_dim[2], (3, 3), activation='sigmoid', padding='same')(x)

    autoencoder = Model(input_img, decoded)
    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
    return autoencoder


# def build_lstm_autoencoder(timesteps, input_dim)
#   inputs = Input(shape=(timesteps, input_dim))
#   encoded = LSTM(latent_dim)(inputs)

#   decoded = RepeatVector(timesteps)(encoded)
#   decoded = LSTM(input_dim, return_sequences=True)(decoded)

#   sequence_autoencoder = Model(inputs, decoded)
#   encoder = Model(inputs, encoded)
#   return encoder, sequence_autoencoder
项目:pythontest    作者:gjq246    | 项目源码 | 文件源码
def generator(self):
        if self.G:
            return self.G
        self.G = Sequential()
        dropout = 0.4
        depth = 64+64+64+64
        dim = 7
        # In: 100
        # Out: dim x dim x depth
        self.G.add(Dense(dim*dim*depth, input_dim=100))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))
        self.G.add(Reshape((dim, dim, depth)))
        self.G.add(Dropout(dropout))

        # In: dim x dim x depth
        # Out: 2*dim x 2*dim x depth/2
        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth/2), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth/4), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(Conv2DTranspose(int(depth/8), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        # Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
        self.G.add(Conv2DTranspose(1, 5, padding='same'))
        self.G.add(Activation('sigmoid'))
        self.G.summary()
        return self.G
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def basic_gen(input_shape, img_shape, nf=128, scale=4, FC=[], use_upsample=False):
    dim, h, w = img_shape 

    img = Input(input_shape)
    x = img
    for fc_dim in FC: 
        x = Dense(fc_dim)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = Dense(nf*2**(scale-1)*(h/2**scale)*(w/2**scale))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Reshape((nf*2**(scale-1), h/2**scale, w/2**scale))(x)

    for s in range(scale-2, -1, -1):
        # up sample can elimiate the checkbroad artifact
        # http://distill.pub/2016/deconv-checkerboard/
        if use_upsample:
            x = UpSampling2D()(x)
            x = Conv2D(nf*2**s, (3,3), padding='same')(x)
        else:
            x = Deconv2D(nf*2**s, (3, 3), strides=(2, 2), padding='same')(x) 
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    if use_upsample:
        x = UpSampling2D()(x)
        x = Conv2D(dim, (3, 3), padding='same')(x)
    else:
        x = Deconv2D(dim, (3, 3), strides=(2, 2), padding='same')(x) 

    x = Activation('tanh')(x)

    return Model(img, x)
项目:neural-style    作者:jayanthkoushik    | 项目源码 | 文件源码
def conv_layer(in_, nb_filter, filter_length, subsample=1, upsample=1, only_conv=False):
    if upsample != 1:
        out = UpSampling2D(size=(upsample, upsample))(in_)
    else:
        out = in_
    padding = int(np.floor(filter_length / 2))
    out = ReflectPadding2D((padding, padding))(out)
    out = Conv2D(nb_filter, filter_length, filter_length, subsample=(subsample, subsample), border_mode="valid")(out)
    if not only_conv:
        out = InstanceNormalization()(out)
        out = Activation("relu")(out)
    return out
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def unet(input_shapes, n_classes):
    inputs = Input(input_shapes['in'], name='in')
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(384, 3, 3, activation='relu', border_mode='same')(pool4)
    conv5 = Convolution2D(384, 3, 3, activation='relu', border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)

    conv10 = Convolution2D(n_classes, 1, 1, activation='sigmoid')(conv9)

    return Model(input=inputs, output=conv10)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def unet_ma(input_shapes, n_classes):
    in_M = Input(input_shapes['in_M'], name='in_M')
    in_A = Input(input_shapes['in_A'], name='in_A')

    inputs = merge([in_A, in_M], mode='concat', concat_axis=1)

    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)

    up7 = merge([UpSampling2D(size=(2, 2))(conv4), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)

    conv10 = Convolution2D(n_classes, 1, 1, activation='sigmoid')(conv9)

    return Model(input=[in_M, in_A], output=conv10)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def merge_block(conv, skip, mode='concat'):
    return merge([UpSampling2D(size=(2, 2))(conv), skip], mode=mode, concat_axis=1)
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def autoencoder(channels=3):

    input_img = Input(shape=(channels, 256, 256))

    x = Conv2D(32, 3, 3, activation='relu', border_mode='same')(input_img)
    x = MaxPooling2D((2, 2), border_mode='same')(x)
    x = Conv2D(16, 3, 3, activation='relu', border_mode='same')(x)
    x = MaxPooling2D((2, 2), border_mode='same')(x)
    x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(x)
    x = MaxPooling2D((2, 2), border_mode='same')(x)
    x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(x)
    encoded = MaxPooling2D((2, 2), border_mode='same')(x)

    x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, 3, 3, activation='relu', border_mode='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, 3, 3, activation='relu', border_mode='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(32, 3, 3, activation='relu', border_mode='same')(x)
    x = UpSampling2D((2, 2))(x)
    decoded = Conv2D(channels, 3, 3, activation='sigmoid', border_mode='same')(x)

    ae = Model(input_img, decoded)

    # sgd = SGD(lr=0.001, momentum=.9, decay=1e-3)
    ae.compile(optimizer='adadelta', loss='mse')
    return ae
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def modeling(self, input_shape, nb_classes):
        nb_filters = 8
        # size of pooling area for max pooling
        pool_size_l = [(4, 4), (4,4)] # 160 --> 40, 40 --> 10
        # convolution kernel size
        kernel_size = (20, 20)

        model = Sequential()

        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid',
                                input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=pool_size_l[0])) # 160 --> 40
        model.add(Dropout(0.25))

        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=pool_size_l[1])) # 40 --> 10
        model.add(Dropout(0.25))

        model.add(UpSampling2D(pool_size_l[1])) # 10 --> 40
        #model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
        #                        border_mode='valid'))
        #model.add(Activation('relu'))
        #model.add(UpSampling2D(pool_size_2))
        #model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(4))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes))
        model.add(Activation('softmax'))

        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def modeling_1(self, input_shape, nb_classes):
        print("Modeling_1")
        nb_filters = 8
        # size of pooling area for max pooling
        pool_size_l = [(4, 4), (4,4)] # 160 --> 40, 40 --> 10
        # convolution kernel size
        kernel_size = (20, 20)

        model = Sequential()

        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid',
                                input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=pool_size_l[0])) # 160 --> 40
        model.add(Dropout(0.25))

        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=pool_size_l[1])) # 40 --> 10
        model.add(Dropout(0.25))

        model.add(Convolution2D(nb_filters, 2, 2,
                                border_mode='valid'))
        model.add(Activation('relu'))
        model.add(UpSampling2D(pool_size_l[1])) # 10 --> 40
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(4))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes))
        model.add(Activation('softmax'))

        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def modeling_2(self, input_shape, nb_classes):
        print("Modeling_2")
        nb_filters = 8
        # size of pooling area for max pooling
        pool_size_l = [(4, 4), (4,4)] # 160 --> 40, 40 --> 10
        # convolution kernel size
        kernel_size = (20, 20)

        model = Sequential()

        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid',
                                input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=pool_size_l[0])) # 160 --> 40
        model.add(Dropout(0.25))

        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=pool_size_l[1])) # 40 --> 10
        model.add(Dropout(0.25))

        model.add(Convolution2D(nb_filters, 2, 2,
                                border_mode='valid'))
        model.add(Activation('relu'))
        model.add(UpSampling2D(pool_size_l[1])) # 10 --> 40
        model.add(Dropout(0.25))

        model.add(Convolution2D(nb_filters, 2, 2,
                                border_mode='valid'))
        model.add(Activation('relu'))
        model.add(UpSampling2D(pool_size_l[0])) # 40 --> 160
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(nb_classes))
        model.add(Activation('softmax'))

        return model
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def modeling(self):
        Lx, Ly = self.Lx, self.Ly
        input_img = Input(shape=(1, Lx, Ly))
        ks = 8

        x = Convolution2D(16, ks*2, ks*2, activation='relu', border_mode='same')(input_img)
        x = MaxPooling2D((2, 2), border_mode='same')(x)  # 160 --> 80
        x = Convolution2D(8, ks*2, ks*2, activation='relu', border_mode='same')(x)
        x = MaxPooling2D((2, 2), border_mode='same')(x)  # 80 --> 40
        x = Convolution2D(8, ks*2, ks*2, activation='relu', border_mode='same')(x)
        encoded = MaxPooling2D((2, 2), border_mode='same')(x) # 40 --> 20

        # at this point the representation is (8, 20, 20) 

        x = Convolution2D(8, ks, ks, activation='relu', border_mode='same')(encoded)
        x = UpSampling2D((2, 2))(x) # 20 --> 40
        x = Convolution2D(8, ks, ks, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x) # 20 --> 80
        x = Convolution2D(16, ks, ks, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x) # 80 --> 160
        decoded = Convolution2D(1, ks, ks, activation='sigmoid', border_mode='same')(x)

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

        self.autoencoder = autoencoder
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def modeling(self):
        input_img = Input(shape=(1, 28, 28))
        # set-1
        x = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(input_img)  # 16,28,28
        x = MaxPooling2D((2, 2), border_mode='same')(x)  # 16,14,14
        x = Dropout(0.25)(x)  # Use dropout after maxpolling

        # set-2
        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(x)  # 8,14,14
        x = MaxPooling2D((2, 2), border_mode='same')(x)  # 8,7,7
        x = Dropout(0.25)(x)  # Use dropout after maxpolling

        # set-3
        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(x)  # 8,7,7
        encoded = x

        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(encoded)  # 8,7,7
        # x = Dropout(0.25)(x) # Use dropout after maxpolling

        x = UpSampling2D((2, 2))(x)  # 8,14,14
        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(x)  # 8,14,14
        # x = Dropout(0.25)(x) # Use dropout after maxpolling

        x = UpSampling2D((2, 2))(x)  # 8, 28, 28
        x = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(x)  # 16, 28, 28
        # x = Dropout(0.25)(x) # Use dropout after maxpolling
        decoded = Convolution2D(
            1, 3, 3, activation='sigmoid', border_mode='same')(x)  # 1, 28, 28

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

        self.autoencoder = autoencoder
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def modeling(self):
        input_img = Input(shape=(1, 28, 28))
        # set-1
        x = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(input_img)  # 16,28,28
        x = MaxPooling2D((2, 2), border_mode='same')(x)  # 16,14,14
        x = Dropout(0.25)(x)  # Use dropout after maxpolling

        # set-2
        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(x)  # 8,14,14
        x = MaxPooling2D((2, 2), border_mode='same')(x)  # 8,7,7
        x = Dropout(0.25)(x)  # Use dropout after maxpolling

        # set-3
        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(x)  # 8,7,7
        encoded = x

        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(encoded)  # 8,7,7
        # x = Dropout(0.25)(x) # Use dropout after maxpolling

        x = UpSampling2D((2, 2))(x)  # 8,14,14
        x = Convolution2D(8, 3, 3, activation='relu',
                          border_mode='same')(x)  # 8,14,14
        # x = Dropout(0.25)(x) # Use dropout after maxpolling

        x = UpSampling2D((2, 2))(x)  # 8, 28, 28
        x = Convolution2D(16, 3, 3, activation='relu',
                          border_mode='same')(x)  # 16, 28, 28
        # x = Dropout(0.25)(x) # Use dropout after maxpolling
        decoded = Convolution2D(
            1, 3, 3, activation='sigmoid', border_mode='same')(x)  # 1, 28, 28

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

        self.autoencoder = autoencoder
项目:Single_Image_SR    作者:sumedhpendurkar    | 项目源码 | 文件源码
def get_modified_vgg19():
    model = vgg19.VGG19(weights = 'imagenet', include_top = True)

    for x in range(20):
        model.layers.pop()
    x = UpSampling2D()(model.layers[-1].ouput)
    x = Deconv2D(64, (3,3), padding = 'same', activation = 'relu')(x)
    x = Deconv2D(3, (1,1), padding = 'same', activation = None)(x)
    mod = keras.models.Model(input = model.input, output = x)
    adam = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=3e-06)
    mod.compile(loss='mse', optimizer = adam)
    return mod
项目:TC-Lung_nodules_detection    作者:Shicoder    | 项目源码 | 文件源码
def get_unet():
    inputs = Input((1, img_rows, img_cols))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    pool1 = BatchNormalization()(pool1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    pool2 = BatchNormalization()(pool2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    pool3 = BatchNormalization()(pool3)

    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    pool4 = BatchNormalization()(pool4)

    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
    conv6 = BatchNormalization()(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
    conv7 = BatchNormalization()(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
    conv8 = BatchNormalization()(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
    conv9 = BatchNormalization()(conv9)

    conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
    model = Model(input=inputs, output=conv10)

    model.compile(optimizer=Adam(lr=1.0e-4), loss=dice_coef_loss, metrics=[dice_coef])

    return model
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def build_fcn(X):  
    #
    #   DESCRIPTION
    #       KERAS FCN DEFINITION
    #       Using the shape of the input to setup the input layer we create a FCN with 2 skips 
    #       
    #   INPUTS
    #       X [number_of_images, 400, 400, channels] 
    #
    #   OUTPUTS 
    #       model uninstantiated Keras model 
    #
    img_rows, img_cols = 400, 400
    inputs = Input(shape=X.shape[1:])
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Convolution2D(32, 4, 4, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Convolution2D(64, 4, 4, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv3) # 50 50 

    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv4)  # 25 25

    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
    conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv5)  
    drop3 = Dropout(0.5)(pool5) 

    convpool3 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(pool3)
    convpool4 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(pool4)
    convdrop3 = Convolution2D(60, 1, 1, activation='relu', border_mode='same')(drop3)

    drop3x5 = UpSampling2D(size=(5, 5))(convdrop3)
    croppeddrop3x5 = Cropping2D(((5,5),(5,5)))(drop3x5) # 50 50
    pool4x2 = UpSampling2D(size=(2, 2))(convpool4) # 50 50
    fuse2 = merge([convpool3, pool4x2, croppeddrop3x5], mode='concat', concat_axis=-1) # 50 50 4224
    upscore3 = UpSampling2D(size=(8, 8))(fuse2) # F 8s 
    convscore3 = Convolution2D(1, 1, 1, activation='sigmoid')(upscore3)

    # Instantiate Model object 
    model = Model(input=inputs, output=convscore3)

    sgd = SGD(lr=1e-5, decay=2, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss=pixel_wise_loss, metrics=['mean_squared_error'])  

    #model.compile(loss='mean_squared_error', optimizer=sgd)

    return model

## CUSTOM LOSS FUNCTION
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4, block_prefix=None):
    '''Adds an upsampling block. Upsampling operation relies on the the type parameter.

    # Arguments
        ip: input keras tensor
        nb_filters: integer, the dimensionality of the output space
            (i.e. the number output of filters in the convolution)
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines
            type of upsampling performed
        weight_decay: weight decay factor
        block_prefix: str, for block unique naming

    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.

    # Returns
        a keras tensor
    '''
    with K.name_scope('TransitionUp'):

        if type == 'upsampling':
            x = UpSampling2D(name=name_or_none(block_prefix, '_upsampling'))(ip)
        elif type == 'subpixel':
            x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                       use_bias=False, kernel_initializer='he_normal', name=name_or_none(block_prefix, '_conv2D'))(ip)
            x = SubPixelUpscaling(scale_factor=2, name=name_or_none(block_prefix, '_subpixel'))(x)
            x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay),
                       use_bias=False, kernel_initializer='he_normal', name=name_or_none(block_prefix, '_conv2D'))(x)
        else:
            x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2),
                                kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay),
                                name=name_or_none(block_prefix, '_conv2DT'))(ip)
        return x
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def get_unet():
  inputs = Input((1, img_rows, img_cols))
  conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
  conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
  pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

  conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
  conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
  pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

  conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
  conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
  pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

  conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
  conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
  pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

  conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
  conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)

  up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
  conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
  conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)

  up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
  conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
  conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)

  up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
  conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
  conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)

  up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
  conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)

  conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)

  model = Model(input=inputs, output=conv10)

  model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])

  return model
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def create_model(img_height, img_width, nb_channels, learning_rate):
    if K.image_dim_ordering() == 'th':
        channel_axis = 1
        inputs = Input((nb_channels, img_height, img_width))
    else:
        channel_axis = 3
        inputs = Input((img_height, img_width, nb_channels))
    print('K.image_dim_ordering={} Channel axis={}'.format(K.image_dim_ordering(), channel_axis))

    # inputs = Input((1, img_rows, img_cols))
    conv1 = Conv2D(32, (3, 3), padding="same", activation="relu")(inputs)
    conv1 = Conv2D(32, (3, 3), padding="same", activation="relu")(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), padding="same", activation="relu")(pool1)
    conv2 = Conv2D(64, (3, 3), padding="same", activation="relu")(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), padding="same", activation="relu")(pool2)
    conv3 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), padding="same", activation="relu")(pool3)
    conv4 = Conv2D(256, (3, 3), padding="same", activation="relu")(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, (3, 3), padding="same", activation="relu")(pool4)
    conv5 = Conv2D(512, (3, 3), padding="same", activation="relu")(conv5)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=channel_axis)
    conv6 = Conv2D(256, (3, 3), padding="same", activation="relu")(up6)
    conv6 = Conv2D(256, (3, 3), padding="same", activation="relu")(conv6)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=channel_axis)
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(up7)
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv7)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=channel_axis)
    conv8 = Conv2D(64, (3, 3), padding="same", activation="relu")(up8)
    conv8 = Conv2D(64, (3, 3), padding="same", activation="relu")(conv8)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=channel_axis)
    conv9 = Conv2D(32, (3, 3), padding="same", activation="relu")(up9)
    conv9 = Conv2D(32, (3, 3), padding="same", activation="relu")(conv9)

    conv10 = Conv2D(nb_channels, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=inputs, outputs=conv10)
    model.compile(optimizer=Adam(lr=learning_rate), loss=dice_coef_loss, metrics=[dice_coef])
    return model
项目:ssgan    作者:samrussell    | 项目源码 | 文件源码
def build_models(self, input_shape):
    self.discriminator = Sequential()
    self.discriminator.add(Conv2D(64, (5, 5), strides=(2, 2), padding = 'same', activation='relu', input_shape=input_shape))
    self.discriminator.add(LeakyReLU(0.2))
    self.discriminator.add(Dropout(0.5))
    self.discriminator.add(Conv2D(128, (5, 5), strides=(2, 2), padding = 'same', activation='relu'))
    self.discriminator.add(LeakyReLU(0.2))
    self.discriminator.add(Dropout(0.5))
    self.discriminator.add(Conv2D(256, (5, 5), strides=(2, 2), padding = 'same', activation='relu'))
    self.discriminator.add(LeakyReLU(0.2))
    self.discriminator.add(Dropout(0.5))
    # 7x7 for MNIST
    #H = Conv2D(512, (5, 5), strides=(2, 2), padding = 'same', activation='relu')(H)
    #H = LeakyReLU(0.2)(H)
    #H = Dropout(0.5)(H)
    self.discriminator.add(Flatten())
    self.discriminator.add(Dense(1+self.num_classes,activation='softmax'))
    self.discriminator.summary()

    self.generator = Sequential()
    self.generator.add(Dense(7*7*256, input_shape=(100,)))
    #self.generator.add(BatchNormalization())
    self.generator.add(Activation('relu'))
    if keras.backend.image_data_format() == 'channels_first':
        self.generator.add(Reshape([256, 7, 7]))
    else:    
        self.generator.add(Reshape([7, 7, 256]))
    self.generator.add(Dropout(0.5))
    self.generator.add(UpSampling2D(size=(2, 2)))
    self.generator.add(Conv2D(128, (5, 5), padding='same'))
    self.generator.add(BatchNormalization())
    self.generator.add(Activation('relu'))
    self.generator.add(Dropout(0.5))
    self.generator.add(UpSampling2D(size=(2, 2)))
    self.generator.add(Conv2D(64, (5, 5), padding='same'))
    self.generator.add(BatchNormalization())
    self.generator.add(Activation('relu'))
    # we're ignoring input shape - just assuming it's 7,7,1
    self.generator.add(Conv2D(1, (5, 5), padding='same'))
    self.generator.add(Activation('sigmoid'))
    self.generator.summary()

    self.real_image_model = Sequential()
    self.real_image_model.add(self.discriminator)
    self.real_image_model.compile(loss='categorical_crossentropy',
                                  optimizer=Adam(lr=1e-4),
                                  metrics=['accuracy'])

    self.fake_image_model = Sequential()
    self.fake_image_model.add(self.generator)
    self.discriminator.trainable = False
    self.fake_image_model.add(self.discriminator)
    self.fake_image_model.compile(loss='categorical_crossentropy',
                                  optimizer=Adam(lr=1e-4),
                                  metrics=['accuracy'])
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def ZF_UNET_224(dropout_val=0.05, batch_norm=True):
    from keras.models import Model
    from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
    from keras.layers.normalization import BatchNormalization
    from keras.layers.core import Dropout, Activation
    inputs = Input((3, 224, 224))
    conv1 = double_conv_layer(inputs, 32, dropout_val, batch_norm)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = double_conv_layer(pool1, 64, dropout_val, batch_norm)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = double_conv_layer(pool2, 128, dropout_val, batch_norm)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = double_conv_layer(pool3, 256, dropout_val, batch_norm)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = double_conv_layer(pool4, 512, dropout_val, batch_norm)
    pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)

    conv6 = double_conv_layer(pool5, 1024, dropout_val, batch_norm)

    up6 = merge([UpSampling2D(size=(2, 2))(conv6), conv5], mode='concat', concat_axis=1)
    conv7 = double_conv_layer(up6, 512, dropout_val, batch_norm)

    up7 = merge([UpSampling2D(size=(2, 2))(conv7), conv4], mode='concat', concat_axis=1)
    conv8 = double_conv_layer(up7, 256, dropout_val, batch_norm)

    up8 = merge([UpSampling2D(size=(2, 2))(conv8), conv3], mode='concat', concat_axis=1)
    conv9 = double_conv_layer(up8, 128, dropout_val, batch_norm)

    up9 = merge([UpSampling2D(size=(2, 2))(conv9), conv2], mode='concat', concat_axis=1)
    conv10 = double_conv_layer(up9, 64, dropout_val, batch_norm)

    up10 = merge([UpSampling2D(size=(2, 2))(conv10), conv1], mode='concat', concat_axis=1)
    conv11 = double_conv_layer(up10, 32, 0, batch_norm)

    conv12 = Convolution2D(1, 1, 1)(conv11)
    conv12 = BatchNormalization(mode=0, axis=1)(conv12)
    conv12 = Activation('sigmoid')(conv12)

    model = Model(input=inputs, output=conv12)
    return model
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build_model():
    model = Sequential()

    # ???????4????????????5*5?1??????????,????1??
    model.add(Convolution2D(4, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, 20, 20)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))

    # ???????8????????????3*3?4??????????????????????
    model.add(GaussianNoise(0.001))
    model.add(UpSampling2D(size=(2, 2), dim_ordering='th'))

    model.add(AtrousConvolution2D(16, 3, 3, border_mode='valid', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    # model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(AveragePooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(Activation('tanh'))

    # ???????16????????????4*4
    model.add(AtrousConvolution2D(8, 3, 3, border_mode='valid', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('linear'))

    # ???????16????????????4*4
    model.add(GaussianNoise(0.002))
    model.add(AtrousConvolution2D(4, 3, 3, border_mode='valid', dim_ordering='th'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Dropout(0.2))
    # model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(AveragePooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(Activation('tanh'))

    # ??????????????????flatten????
    model.add(Flatten())
    model.add(Dense(8))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))

    model.add(Dense(1))
    model.add(Activation('linear'))

    start = time.time()

    # ??SGD + momentum
    # model.compile????loss??????(????)
    # sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="rmsprop") # mse kld # Nadam  rmsprop
    print "Compilation Time : ", time.time() - start
    return model
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def build_model():
    model = Sequential()

    # ???????4????
    model.add(Convolution2D(8, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, 20, 20)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(GaussianNoise(0.001))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    model.add(Activation('tanh'))

    # ???????8?????
    # model.add(GaussianNoise(0.001))
    # model.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
    model.add(AtrousConvolution2D(16, 3, 3, border_mode='valid', dim_ordering='th'))
    # model.add(ZeroPadding2D((1, 1)))
    model.add(Activation('tanh'))
    # model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='th'))
    # model.add(Activation('tanh'))

    # ??????????????????flatten????
    model.add(Flatten())
    model.add(Dense(20))
    model.add(Activation('tanh'))

    # LSTM ?
    model.add(Reshape((20, 1)))
    model.add(LSTM(input_dim=1, output_dim=32, activation='tanh', inner_activation='tanh', return_sequences=True))
    model.add(GaussianNoise(0.01))
    model.add(LSTM(64, activation='tanh', inner_activation='tanh', return_sequences=False))
    model.add(Dropout(0.2))  # Dropout overfitting

    model.add(Dense(1))
    model.add(Activation('linear'))

    start = time.time()

    # ??SGD + momentum
    # model.compile????loss??????(????)
    # sgd = SGD(lr=0.08, decay=1e-6, momentum=0.9, nesterov=True)
    # model.compile(loss="mse", optimizer=sgd)
    model.compile(loss="mse", optimizer="Nadam") # Nadam # rmsprop
    print "Compilation Time : ", time.time() - start
    return model
项目:keras-autoencoder    作者:Rentier    | 项目源码 | 文件源码
def __init__(self, h_in, w_in, dims):
        # Each MaxPooling2D (2,2) layer halves the image size
        resize_factor = len(dims)

        # Number of filters on last layer correspond to
        # the number of filters of the last conv layer
        filters_encoded = dims[-1][0]

        input_img = Input(shape=(1,h_in, w_in), name='EncoderIn')
        decoder_input = Input(shape=(filters_encoded, h_in / (2 ** resize_factor), w_in / (2 ** resize_factor)), name='DecoderIn')

        # Construct encoder layers
        encoded = input_img

        for i, (filters, rows, cols) in enumerate(dims):
            name = 'Conv{0}'.format(i)
            encoded = Convolution2D(filters, rows, cols, activation='relu', border_mode='same', name=name)(encoded)
            encoded = MaxPooling2D((2, 2), border_mode='same', name= 'MaxPool{0}'.format(i))(encoded)

        # Construct decoder layers
        # The decoded is connected to the encoders, whereas the decoder is not
        decoded = encoded
        decoder = decoder_input
        for i, dim in enumerate(reversed(dims)):
            convlayer = Convolution2D(filters, rows, cols, activation='relu', border_mode='same', name='Deconv{0}'.format(i))
            decoded = convlayer(decoded)
            decoder = convlayer(decoder)

            upsample = UpSampling2D((2, 2), name='UpSampling{0}'.format(i))
            decoded = upsample(decoded)
            decoder = upsample(decoder)

        # Reduce from X filters to 1 in the output layer. Make sure its sigmoid for the [0..1] range
        convlayer = Convolution2D(1, dims[0][0], dims[0][1], activation='sigmoid', border_mode='same')
        decoded = convlayer(decoded)
        decoder = convlayer(decoder)

        self.autoencoder = Model(input=input_img, output=decoded)
        self.encoder = Model(input=input_img, output=encoded)
        self.decoder = Model(input=decoder_input, output=decoder)

        self.autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def rnet1(input_shapes, n_classes):
    def conv(size, x):
        x = Convolution2D(size, 3, 3, border_mode='same', init='he_normal', bias=False)(x)
        x = BatchNormalization(axis=1, mode=0)(x)
        x = PReLU(shared_axes=[2, 3])(x)
        return x

    def unet_block(sizes, inp):
        x = inp

        skips = []

        for sz in sizes[:-1]:
            x = conv(sz, x)
            skips.append(x)
            x = MaxPooling2D((2, 2))(x)

        x = conv(sizes[-1], x)

        for sz in reversed(sizes[:-1]):
            x = conv(sz, merge([UpSampling2D((2, 2))(x), skips.pop()], mode='concat', concat_axis=1))

        return x

    def fcn_block(sizes, inp):
        x = inp

        for sz in sizes:
            x = conv(sz, x)

        return Dropout(0.2)(x)

    # Build piramid of inputs
    inp0 = Input(input_shapes['in'], name='in')
    inp1 = AveragePooling2D((2, 2))(inp0)
    inp2 = AveragePooling2D((2, 2))(inp1)

    # Build outputs in resnet fashion
    out2 = unet_block([32, 48], inp2)
    #out2 = merge([unet_block([32, 48, 32], merge([inp2, out2], mode='concat', concat_axis=1)), out2], mode='sum')

    out1 = UpSampling2D((2, 2))(out2)
    #out1 = merge([unet_block([32, 32, 48], merge([inp1, out1], mode='concat', concat_axis=1)), out1], mode='sum')
    out1 = merge([unet_block([32, 48, 64], merge([inp1, out1], mode='concat', concat_axis=1)), out1], mode='sum')

    out0 = UpSampling2D((2, 2))(out1)
    out0 = merge([unet_block([32, 48, 64], merge([inp0, out0], mode='concat', concat_axis=1)), out0], mode='sum')
    out0 = merge([unet_block([32, 48, 64], merge([inp0, out0], mode='concat', concat_axis=1)), out0], mode='sum')

    # Final convolution
    out = Convolution2D(n_classes, 1, 1, activation='sigmoid')(out0)

    return Model(input=inp0, output=out)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def rnet1_mi(input_shapes, n_classes):
    def conv(size, x):
        x = Convolution2D(size, 3, 3, border_mode='same', init='he_normal', bias=False)(x)
        x = BatchNormalization(axis=1, mode=0)(x)
        x = PReLU(shared_axes=[2, 3])(x)
        return x

    def unet_block(sizes, inp):
        x = inp

        skips = []

        for sz in sizes[:-1]:
            x = conv(sz, x)
            skips.append(x)
            x = MaxPooling2D((2, 2))(x)

        x = conv(sizes[-1], x)

        for sz in reversed(sizes[:-1]):
            x = conv(sz, merge([UpSampling2D((2, 2))(x), skips.pop()], mode='concat', concat_axis=1))

        return x

    def radd(out, inp, block):
        block_in = merge([inp, out], mode='concat', concat_axis=1)
        block_out = block(block_in)

        return merge([block_out, out], mode='sum')

    in_I = Input(input_shapes['in_I'], name='in_I')
    in_M = Input(input_shapes['in_M'], name='in_M')

    # Build piramid of inputs
    inp0 = in_I
    inp1 = AveragePooling2D((2, 2))(inp0)
    inp2 = merge([AveragePooling2D((2, 2))(inp1), in_M], mode='concat', concat_axis=1)
    inp3 = AveragePooling2D((2, 2))(inp2)

    # Build outputs in resnet fashion
    out3 = unet_block([32, 48], inp3)

    out2 = UpSampling2D((2, 2))(out3)
    out2 = radd(out2, inp2, lambda x: unet_block([32, 48], x))

    out1 = UpSampling2D((2, 2))(out2)
    out1 = radd(out1, inp1, lambda x: unet_block([32, 48], x))
    out1 = radd(out1, inp1, lambda x: unet_block([32, 48, 64], x))

    out0 = UpSampling2D((2, 2))(out1)
    out0 = radd(out0, inp0, lambda x: unet_block([32, 48], x))
    out0 = radd(out0, inp0, lambda x: unet_block([32, 48, 64], x))

    # Final convolution
    out = Convolution2D(n_classes, 1, 1, activation='sigmoid')(out0)

    return Model(input=[in_I, in_M], output=out)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def unet_mi_2(input_shapes, n_classes):
    in_I = Input(input_shapes['in_I'], name='in_I')
    in_M = Input(input_shapes['in_M'], name='in_M')

    conv1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(in_I)
    conv1 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(merge([pool2, in_M], mode='concat', concat_axis=1))
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool4)
    conv5 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv4), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv9)

    conv10 = Convolution2D(n_classes, 1, 1, activation='sigmoid')(conv9)

    return Model(input=[in_I, in_M], output=conv10)
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:alno    | 项目源码 | 文件源码
def dnet1_mi(input_shapes, n_classes):
    def concat(xs):
        if len(xs) == 1:
            return xs[0]

        return merge(xs, mode='concat', concat_axis=1)

    def conv(k, s, x):
        return Convolution2D(k, s, s, border_mode='same', init='he_normal')(x)

    def dense_block(k, n, inp, append=False):
        outputs = [inp] if append else []

        for i in xrange(n):
            x = Convolution2D(k, 3, 3, border_mode='same', init='he_normal')(inp)
            x = BatchNormalization(axis=1, mode=0)(x)
            x = PReLU(shared_axes=[2, 3])(x)

            outputs.append(x)
            inp = concat([inp, x])

        return concat(outputs)

    def down_block(x):
        return MaxPooling2D((2, 2))(x)

    def up_block(x):
        return UpSampling2D(size=(2, 2))(x)

    inputs = dict([(name, Input(shape, name=name)) for name, shape in input_shapes.items()])

    # Downpath
    d0 = conv(32, 1, concat([inputs['in_I'], inputs['in_IF']]))

    c1 = dense_block(16, 2, d0, append=True)
    d1 = down_block(c1)

    c2 = dense_block(16, 3, d1, append=True)
    d2 = down_block(c2)

    c3 = dense_block(16, 4, concat([d2, inputs['in_M'], inputs['in_MI']]), append=True)
    d3 = down_block(c3)

    c4 = dense_block(16, 5, d3, append=True)
    d4 = down_block(c4)

    # Bottleneck
    c5 = dense_block(16, 6, d4, append=True)

    # Uppath
    u4 = dense_block(16, 10, concat([c4, up_block(c5)]))
    u3 = dense_block(16,  8, concat([c3, up_block(u4)]))
    u2 = dense_block(16,  6, concat([c2, up_block(u3)]))
    u1 = dense_block(16,  4, concat([c1, up_block(u2)]))

    out = Activation('sigmoid')(conv(n_classes, 1, u1))

    return Model(input=inputs.values(), output=out)