Python keras.backend 模块,set_image_data_format() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.backend.set_image_data_format()

项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_DSSIM_channels_last():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_last')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [input_dim, input_dim, 3]
        X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_DSSIM_channels_first():
    prev_data = K.image_data_format()
    K.set_image_data_format('channels_first')
    for input_dim, kernel_size in zip([32, 33], [2, 3]):
        input_shape = [3, input_dim, input_dim]
        X = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)
        y = np.random.random_sample(4 * input_dim * input_dim * 3).reshape([4] + input_shape)

        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        model.add(Conv2D(3, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        model.compile(loss=DSSIMObjective(kernel_size=kernel_size), metrics=['mse'], optimizer=adam)
        model.fit(X, y, batch_size=2, epochs=1, shuffle='batch')

        # Test same
        x1 = K.constant(X, 'float32')
        x2 = K.constant(X, 'float32')
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.0, K.eval(dssim(x1, x2)), atol=1e-4)

        # Test opposite
        x1 = K.zeros([4] + input_shape)
        x2 = K.ones([4] + input_shape)
        dssim = DSSIMObjective(kernel_size=kernel_size)
        assert_allclose(0.5, K.eval(dssim(x1, x2)), atol=1e-4)

    K.set_image_data_format(prev_data)
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def test_smoke_channels_first():
    K.set_image_data_format('channels_first')
    _test_smoke('channels_first')
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def test_smoke_channels_last():
    K.set_image_data_format('channels_last')
    _test_smoke('channels_last')
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def test_equivalence_channels_first():
    K.set_image_data_format('channels_first')
    _test_equivalence('channels_first')
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def test_equivalence_channels_last():
    K.set_image_data_format('channels_last')
    _test_equivalence('channels_last')
项目:panotti    作者:drscotthawley    | 项目源码 | 文件源码
def MyCNN_Keras2(X, nb_classes, nb_layers=4):
    from keras import backend as K
    K.set_image_data_format('channels_first')

    nb_filters = 32  # number of convolutional filters = "feature maps"
    kernel_size = (3, 3)  # convolution kernel size
    pool_size = (2, 2)  # size of pooling area for max pooling
    cl_dropout = 0.5    # conv. layer dropout
    dl_dropout = 0.8    # dense layer dropout

    channels = X.shape[1]   # channels = 1 for mono, 2 for stereo

    print(" MyCNN_Keras2: X.shape = ",X.shape,", channels = ",channels)
    input_shape = (channels, X.shape[2], X.shape[3])
    model = Sequential()
    #model.add(Conv2D(nb_filters, kernel_size, border_mode='valid', input_shape=input_shape))
    model.add(Conv2D(nb_filters, kernel_size, border_mode='valid', input_shape=input_shape))
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))

    for layer in range(nb_layers-1):   # add more layers than just the first
        model.add(Conv2D(nb_filters, kernel_size))
        model.add(BatchNormalization(axis=1))
        model.add(ELU(alpha=1.0))
        model.add(MaxPooling2D(pool_size=pool_size))
        model.add(Dropout(cl_dropout))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(dl_dropout))
    model.add(Dense(nb_classes))
    model.add(Activation("softmax"))
    model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
    return model
项目:keras-transfer-learning-for-oxford102    作者:Arsey    | 项目源码 | 文件源码
def set_img_format():
    try:
        if K.backend() == 'theano':
            K.set_image_data_format('channels_first')
        else:
            K.set_image_data_format('channels_last')
    except AttributeError:
        if K._BACKEND == 'theano':
            K.set_image_dim_ordering('th')
        else:
            K.set_image_dim_ordering('tf')
项目:keras-fcn    作者:JihongJu    | 项目源码 | 文件源码
def test_vgg16():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG16(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG16(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight)
项目:keras-fcn    作者:JihongJu    | 项目源码 | 文件源码
def test_vgg19():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG19(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG19(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight)