Python keras.layers.core 模块,SpatialDropout2D() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用keras.layers.core.SpatialDropout2D()

项目:sc2_predictor    作者:hellno    | 项目源码 | 文件源码
def get_model(shape, dropout=0.5, path=None):
    print('building neural network')

    model=Sequential()

    model.add(Convolution2D(512, 3, 3, border_mode='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(Convolution2D(512, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(SpatialDropout2D(dropout))

    model.add(Flatten())#input_shape=shape))
    # model.add(Dense(4096))
    # model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(1))
    #model.add(Activation('linear'))

    return model
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_conv_dropout_random(self):
        np.random.seed(1988)
        num_samples = 1
        input_dim = 8
        input_shape = (input_dim, input_dim, 3)
        num_kernels = 2
        kernel_height = 5
        kernel_width = 5
        hidden_dim = 4

        # Define a model
        model = Sequential()
        model.add(Conv2D(input_shape = input_shape,
            filters = num_kernels, kernel_size=(kernel_height, kernel_width)))
        model.add(SpatialDropout2D(0.5))
        model.add(Flatten())
        model.add(Dense(hidden_dim))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_dropout():
    layer_test(core.Dropout,
               kwargs={'p': 0.5},
               input_shape=(3, 2))

    layer_test(core.SpatialDropout1D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4))

    layer_test(core.SpatialDropout2D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4, 5))

    layer_test(core.SpatialDropout3D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4, 5, 6))
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_dropout():
    layer_test(core.Dropout,
               kwargs={'p': 0.5},
               input_shape=(3, 2))

    layer_test(core.Dropout,
               kwargs={'p': 0.5, 'noise_shape': [3, 1]},
               input_shape=(3, 2))

    layer_test(core.SpatialDropout1D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4))

    layer_test(core.SpatialDropout2D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4, 5))

    layer_test(core.SpatialDropout3D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4, 5, 6))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_dropout():
    layer_test(core.Dropout,
               kwargs={'p': 0.5},
               input_shape=(3, 2))

    layer_test(core.SpatialDropout1D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4))

    layer_test(core.SpatialDropout2D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4, 5))

    layer_test(core.SpatialDropout3D,
               kwargs={'p': 0.5},
               input_shape=(2, 3, 4, 5, 6))
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                            # padding='same',
                            strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                     # padding='same',
                     strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other, indices = MaxPoolingWithArgmax2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    if downsample:
        return encoder, indices
    else:
        return encoder