Python keras.backend 模块,eval() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.eval()

项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def update(self, batch):
        x, y = batch
        y = np.array(y)
        y_pred = None

        if self.model_type == 'nn':
            self.train_loss, self.train_acc = self.model.train_on_batch(x, y)
            y_pred = self.model.predict_on_batch(x).reshape(-1)
            self.train_auc = roc_auc_score(y, y_pred)

        if self.model_type == 'ngrams':
            x = vectorize_select_from_data(x, self.vectorizers, self.selectors)
            self.model.fit(x, y.reshape(-1))
            y_pred = np.array(self.model.predict_proba(x)[:,1]).reshape(-1)
            y_pred_tensor = K.constant(y_pred, dtype='float64')
            self.train_loss = K.eval(binary_crossentropy(y.astype('float'), y_pred_tensor))
            self.train_acc = K.eval(binary_accuracy(y.astype('float'), y_pred_tensor))
            self.train_auc = roc_auc_score(y, y_pred)
        self.updates += 1
        return y_pred
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_sub_pixel_upscaling():
    num_samples = 2
    num_row = 16
    num_col = 16
    input_dtype = K.floatx()

    for scale_factor in [2, 3, 4]:
        input_data = np.random.random((num_samples, 4 * (scale_factor ** 2), num_row, num_col))
        input_data = input_data.astype(input_dtype)

        if K.image_data_format() == 'channels_last':
            input_data = input_data.transpose((0, 2, 3, 1))

        input_tensor = K.variable(input_data)
        expected_output = K.eval(KC.depth_to_space(input_tensor,
                                                   scale=scale_factor))

        layer_test(convolutional.SubPixelUpscaling,
                   kwargs={'scale_factor': scale_factor},
                   input_data=input_data,
                   expected_output=expected_output,
                   expected_output_dtype=K.floatx())
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_instancenorm_correctness_rank1():
    # make sure it works with rank1 input tensor (batched)
    model = Sequential()
    norm = normalization.InstanceNormalization(input_shape=(10,), axis=None)
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
    model.fit(x, x, epochs=4, verbose=0)
    out = model.predict(x)
    out -= K.eval(norm.beta)
    out /= K.eval(norm.gamma)

    assert_allclose(out.mean(), 0.0, atol=1e-1)
    assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_instancenorm_perinstancecorrectness():
    model = Sequential()
    norm = normalization.InstanceNormalization(input_shape=(10,))
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # bimodal distribution
    z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
    x = np.append(z, y)
    x = np.reshape(x, (4, 10))
    model.fit(x, x, epochs=4, batch_size=4, verbose=1)
    out = model.predict(x)
    out -= K.eval(norm.beta)
    out /= K.eval(norm.gamma)

    # verify that each instance in the batch is individually normalized
    for i in range(4):
        instance = out[i]
        assert_allclose(instance.mean(), 0.0, atol=1e-1)
        assert_allclose(instance.std(), 1.0, atol=1e-1)

    # if each instance is normalized, so should the batch
    assert_allclose(out.mean(), 0.0, atol=1e-1)
    assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_batchrenorm_mode_0_or_2():
    for training in [1, 0, None]:
        ip = Input(shape=(10,))
        norm_m0 = normalization.BatchRenormalization(momentum=0.8)
        out = norm_m0(ip, training=training)
        model = Model(ip, out)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, epochs=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
项目:Deep-Learning-Plugin    作者:flowjo-lakes    | 项目源码 | 文件源码
def checkScale(targetSample, outputSample, scale,  nIters = 3, batchSize = 1000):
    mmd_TT = np.zeros(nIters) 
    mmd_OT = np.zeros(nIters)
    #ratios = np.zeros(nIters)     
    for i in range(nIters):    
        T = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        T1 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        T2 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        O = outputSample[np.random.randint(outputSample.shape[0], size=batchSize),:]
        mmd_TT[i] = K.eval(cf.MMD(T1,T2, scales=[scale]).cost(K.variable(value=T1), K.variable(value=T2)))
        mmd_OT[i] = K.eval(cf.MMD(T,O, scales=[scale]).cost(K.variable(value=T), K.variable(value=O)))
        #ratios[i] = (mmd_OT[i] - mmd_TT[i])/ mmd_OT[i]
    print('scale: ' + str(scale))
    print('mmd_TT: ' + str (np.mean(mmd_TT)))
    print('mmd_OT: ' + str (np.mean(mmd_OT)))
    ratio = (np.mean(mmd_OT) - np.mean(mmd_TT))/ np.mean(mmd_OT)
    print('ratio: ' + str(ratio))
    return np.mean(mmd_TT), np.mean(mmd_OT), ratio
项目:Deep-Learning-Plugin    作者:flowjo-lakes    | 项目源码 | 文件源码
def checkScale(targetSample, outputSample, scale,  nIters = 3, batchSize = 1000):
    mmd_TT = np.zeros(nIters) 
    mmd_OT = np.zeros(nIters)
    #ratios = np.zeros(nIters)     
    for i in range(nIters):    
        T = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        T1 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        T2 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        O = outputSample[np.random.randint(outputSample.shape[0], size=batchSize),:]
        mmd_TT[i] = K.eval(cf.MMD(T1,T2, scales=[scale]).cost(K.variable(value=T1), K.variable(value=T2)))
        mmd_OT[i] = K.eval(cf.MMD(T,O, scales=[scale]).cost(K.variable(value=T), K.variable(value=O)))
        #ratios[i] = (mmd_OT[i] - mmd_TT[i])/ mmd_OT[i]
    print('scale: ' + str(scale))
    print('mmd_TT: ' + str (np.mean(mmd_TT)))
    print('mmd_OT: ' + str (np.mean(mmd_OT)))
    ratio = (np.mean(mmd_OT) - np.mean(mmd_TT))/ np.mean(mmd_OT)
    print('ratio: ' + str(ratio))
    return np.mean(mmd_TT), np.mean(mmd_OT), ratio
项目:attention-sum-reader    作者:cairoHy    | 项目源码 | 文件源码
def preprocess_input_sequences(self, data, shuffle=True):
        """
        ??????
        shuffle
        PAD/TRUNC????????
        y_true????self.A_len????index=0??????one-hot??
        """
        documents, questions, answer, candidates = self.union_shuffle(data) if shuffle else data
        d_lens = [len(i) for i in documents]

        questions_ok = pad_sequences(questions, maxlen=self.q_len, dtype="int32", padding="post", truncating="post")
        documents_ok = pad_sequences(documents, maxlen=self.d_len, dtype="int32", padding="post", truncating="post")
        context_mask = K.eval(tf.sequence_mask(d_lens, self.d_len, dtype=tf.float32))
        candidates_ok = pad_sequences(candidates, maxlen=self.A_len, dtype="int32", padding="post", truncating="post")
        y_true = np.zeros_like(candidates_ok)
        y_true[:, 0] = 1
        return questions_ok, documents_ok, context_mask, candidates_ok, y_true
项目:attention-sum-reader    作者:cairoHy    | 项目源码 | 文件源码
def preprocess_input_sequences(self, data, shuffle=True):
        """
        ??????
        shuffle
        PAD/TRUNC????????
        y_true????self.A_len????index=0??????one-hot??
        """
        documents, questions, answer, candidates = self.union_shuffle(data) if shuffle else data
        d_lens = [len(i) for i in documents]

        questions_ok = pad_sequences(questions, maxlen=self.q_len, dtype="int32", padding="post", truncating="post")
        documents_ok = pad_sequences(documents, maxlen=self.d_len, dtype="int32", padding="post", truncating="post")
        context_mask = K.eval(tf.sequence_mask(d_lens, self.d_len, dtype=tf.float32))
        candidates_ok = pad_sequences(candidates, maxlen=self.A_len, dtype="int32", padding="post", truncating="post")
        y_true = np.zeros_like(candidates_ok)
        y_true[:, 0] = 1
        return questions_ok, documents_ok, context_mask, candidates_ok, y_true
项目:pic2vec    作者:datarobot    | 项目源码 | 文件源码
def test_downsample_model_features():
    """
    Test creates a toy numpy array, and checks that the method
    correctly downsamples the array into a hand-checked tensor
    """
    # Create the spliced and averaged tensor via downsampling function
    array = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                      [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
                      [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
                      ])
    tensor = K.variable(array)

    x = _downsample_model_features(tensor, 5)

    # Create the spliced and averaged tensor by hand
    check_array = np.array([[1.5, 3.5, 5.5, 7.5, 9.5],
                            [11.5, 13.5, 15.5, 17.5, 19.5],
                            [21.5, 23.5, 25.5, 27.5, 29.5]
                            ])
    check_tensor = K.variable(check_array)
    # Check that they are equal: that it returns the correct tensor
    assert np.allclose(K.eval(check_tensor), K.eval(x), atol=ATOL)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def check_two_tensor_operation(function_name, x_input_shape,
                               y_input_shape, **kwargs):
    xval = np.random.random(x_input_shape) - 0.5

    xth = KTH.variable(xval)
    xtf = KTF.variable(xval)

    yval = np.random.random(y_input_shape) - 0.5

    yth = KTH.variable(yval)
    ytf = KTF.variable(yval)

    zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def check_composed_tensor_operations(first_function_name, first_function_args,
                                     second_function_name, second_function_args,
                                     input_shape):
    ''' Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    '''
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_shape_operations(self):
        # concatenate
        xval = np.random.random((4, 3))
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        yval = np.random.random((4, 2))
        yth = KTH.variable(yval)
        ytf = KTF.variable(yval)
        zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
        ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-05)

        check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
        check_single_tensor_operation('permute_dimensions', (4, 2, 3),
                                      pattern=(2, 0, 1))
        check_single_tensor_operation('repeat', (4, 1), n=3)
        check_single_tensor_operation('flatten', (4, 1))
        check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
        check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
        check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
        check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
        check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
                                         'squeeze', {'axis': 2},
                                         (4, 3, 1, 1))
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_sparse_dot(self):
        x_d = np.array([0, 7, 2, 3], dtype=np.float32)
        x_r = np.array([0, 2, 2, 3], dtype=np.int64)
        x_c = np.array([4, 3, 2, 3], dtype=np.int64)

        x_sparse = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))
        x_dense = x_sparse.toarray()

        W = np.random.random((5, 4))

        backends = [KTF]
        if KTH.th_sparse_module:
            # Theano has some dependency issues for sparse
            backends.append(KTH)

        for K in backends:
            t_W = K.variable(W)
            k_s = K.eval(K.dot(K.variable(x_sparse), t_W))
            k_d = K.eval(K.dot(K.variable(x_dense), t_W))

            assert k_s.shape == k_d.shape
            assert_allclose(k_s, k_d, atol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_arange(self):
        for test_value in (-20, 0, 1, 10):
            t_a = KTF.arange(test_value)
            a = KTF.eval(t_a)
            assert np.array_equal(a, np.arange(test_value))
            t_b = KTH.arange(test_value)
            b = KTH.eval(t_b)
            assert np.array_equal(b, np.arange(test_value))
            assert np.array_equal(a, b)
            assert KTF.dtype(t_a) == KTH.dtype(t_b)
        for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):
            a = KTF.eval(KTF.arange(start, stop, step))
            assert np.array_equal(a, np.arange(start, stop, step))
            b = KTH.eval(KTH.arange(start, stop, step))
            assert np.array_equal(b, np.arange(start, stop, step))
            assert np.array_equal(a, b)
        for dtype in ('int32', 'int64', 'float32', 'float64'):
            for backend in (KTF, KTH):
                t = backend.arange(10, dtype=dtype)
                assert backend.dtype(t) == dtype
项目:BatchEffectRemoval    作者:ushaham    | 项目源码 | 文件源码
def checkScale(targetSample, outputSample, scale,  nIters = 3, batchSize = 1000):
    mmd_TT = np.zeros(nIters) 
    mmd_OT = np.zeros(nIters)
    #ratios = np.zeros(nIters)     
    for i in range(nIters):    
        T = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        T1 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        T2 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
        O = outputSample[np.random.randint(outputSample.shape[0], size=batchSize),:]
        mmd_TT[i] = K.eval(cf.MMD(T1,T2, scales=[scale]).cost(K.variable(value=T1), K.variable(value=T2)))
        mmd_OT[i] = K.eval(cf.MMD(T,O, scales=[scale]).cost(K.variable(value=T), K.variable(value=O)))
        #ratios[i] = (mmd_OT[i] - mmd_TT[i])/ mmd_OT[i]
    print('scale: ' + str(scale))
    print('mmd_TT: ' + str (np.mean(mmd_TT)))
    print('mmd_OT: ' + str (np.mean(mmd_OT)))
    ratio = (np.mean(mmd_OT) - np.mean(mmd_TT))/ np.mean(mmd_OT)
    print('ratio: ' + str(ratio))
    return np.mean(mmd_TT), np.mean(mmd_OT), ratio
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def TEST_lossfun():
    ''' Test the hinge-rank loss function in model.py '''
    # data 
    # 1 correct word , 2 wrong ones
    image_vectors = np.random.rand(3, WORD_DIM)
    word_vectors  = np.random.rand(3, WORD_DIM)
    image_vectors = K.variable(image_vectors)
    word_vectors  = K.variable(word_vectors)

    # import module 
    from model import hinge_rank_loss

    # test
    out = K.eval(hinge_rank_loss(word_vectors, image_vectors, TESTING=True))
    print out

    print "Completed TEST_lossfun"
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
    if layer_class == recurrent.SimpleRNN:
        assert len(layer.losses) == 3
    if layer_class == recurrent.GRU:
        assert len(layer.losses) == 9
    if layer_class == recurrent.LSTM:
        assert len(layer.losses) == 12
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def check_two_tensor_operation(function_name, x_input_shape,
                               y_input_shape, **kwargs):
    xval = np.random.random(x_input_shape) - 0.5

    xth = KTH.variable(xval)
    xtf = KTF.variable(xval)

    yval = np.random.random(y_input_shape) - 0.5

    yth = KTH.variable(yval)
    ytf = KTF.variable(yval)

    zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
    ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def check_composed_tensor_operations(first_function_name, first_function_args,
                                     second_function_name, second_function_args,
                                     input_shape):
    ''' Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    '''
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_repeat_elements(self):
        reps = 3
        for ndims in [1, 2, 3]:
            shape = np.arange(2, 2 + ndims)
            arr = np.arange(np.prod(shape)).reshape(shape)
            arr_th = KTH.variable(arr)
            arr_tf = KTF.variable(arr)

            for rep_axis in range(ndims):
                np_rep = np.repeat(arr, reps, axis=rep_axis)
                th_rep = KTH.eval(
                    KTH.repeat_elements(arr_th, reps, axis=rep_axis))
                tf_rep = KTF.eval(
                    KTF.repeat_elements(arr_tf, reps, axis=rep_axis))

                assert th_rep.shape == np_rep.shape
                assert tf_rep.shape == np_rep.shape
                assert_allclose(np_rep, th_rep, atol=1e-05)
                assert_allclose(np_rep, tf_rep, atol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_sparse_dot(self):
        x_d = np.array([0, 7, 2, 3], dtype=np.float32)
        x_r = np.array([0, 2, 2, 3], dtype=np.int64)
        x_c = np.array([4, 3, 2, 3], dtype=np.int64)

        x_sparse = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))
        x_dense = x_sparse.toarray()

        W = np.random.random((5, 4))

        backends = [KTF]
        if KTH.th_sparse_module:
            # Theano has some dependency issues for sparse
            backends.append(KTH)

        for K in backends:
            t_W = K.variable(W)
            k_s = K.eval(K.dot(K.variable(x_sparse), t_W))
            k_d = K.eval(K.dot(K.variable(x_dense), t_W))

            assert k_s.shape == k_d.shape
            assert_allclose(k_s, k_d, atol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_arange(self):
        for test_value in (-20, 0, 1, 10):
            t_a = KTF.arange(test_value)
            a = KTF.eval(t_a)
            assert np.array_equal(a, np.arange(test_value))
            t_b = KTH.arange(test_value)
            b = KTH.eval(t_b)
            assert np.array_equal(b, np.arange(test_value))
            assert np.array_equal(a, b)
            assert KTF.dtype(t_a) == KTH.dtype(t_b)
        for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):
            a = KTF.eval(KTF.arange(start, stop, step))
            assert np.array_equal(a, np.arange(start, stop, step))
            b = KTH.eval(KTH.arange(start, stop, step))
            assert np.array_equal(b, np.arange(start, stop, step))
            assert np.array_equal(a, b)
        for dtype in ('int32', 'int64', 'float32', 'float64'):
            for backend in (KTF, KTH):
                t = backend.arange(10, dtype=dtype)
                assert backend.dtype(t) == dtype
项目:nesgym    作者:codescv    | 项目源码 | 文件源码
def get_learning_rate(self):
        optimizer = self.base_model.optimizer
        lr = K.eval(optimizer.lr * (1. / (1. + optimizer.decay * optimizer.iterations)))
        return lr
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_loss_masking():
    weighted_loss = weighted_objective(objectives.get('mae'))
    shape = (3, 4, 2)
    X = np.arange(24).reshape(shape)
    Y = 2 * X

    # Normally the trailing 1 is added by standardize_weights
    weights = np.ones((3,))
    mask = np.ones((3, 4))
    mask[1, 0] = 0

    out = K.eval(weighted_loss(K.variable(X),
                               K.variable(Y),
                               K.variable(weights),
                               K.variable(mask)))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_maxnorm():
    for m in test_values:
        norm_instance = constraints.maxnorm(m)
        normed = norm_instance(K.variable(example_array))
        assert(np.all(K.eval(normed) < m))

    # a more explicit example
    norm_instance = constraints.maxnorm(2.0)
    x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
    x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0],
                                [2.0, 0, 0],
                                [2. / np.sqrt(3), 2. / np.sqrt(3), 2. / np.sqrt(3)]]).T
    x_normed_actual = K.eval(norm_instance(K.variable(x)))
    assert_allclose(x_normed_actual, x_normed_target, rtol=1e-05)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_nonneg():
    nonneg_instance = constraints.nonneg()
    normed = nonneg_instance(K.variable(example_array))
    assert(np.all(np.min(K.eval(normed), axis=1) == 0.))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_unitnorm():
    unitnorm_instance = constraints.unitnorm()
    normalized = unitnorm_instance(K.variable(example_array))
    norm_of_normalized = np.sqrt(np.sum(K.eval(normalized)**2, axis=0))
    # in the unit norm constraint, it should be equal to 1.
    difference = norm_of_normalized - 1.
    largest_difference = np.max(np.abs(difference))
    assert(np.abs(largest_difference) < 10e-5)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_objective_shapes_3d():
    y_a = K.variable(np.random.random((5, 6, 7)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    for obj in allobj:
        objective_output = obj(y_a, y_b)
        assert K.eval(objective_output).shape == (5, 6)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_cce_one_hot():
    y_a = K.variable(np.random.randint(0, 7, (5, 6)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    objective_output = objectives.sparse_categorical_crossentropy(y_a, y_b)
    assert K.eval(objective_output).shape == (5, 6)

    y_a = K.variable(np.random.randint(0, 7, (6,)))
    y_b = K.variable(np.random.random((6, 7)))
    assert K.eval(objectives.sparse_categorical_crossentropy(y_a, y_b)).shape == (6,)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_zero_padding_1d():
    nb_samples = 2
    input_dim = 2
    nb_steps = 5
    shape = (nb_samples, nb_steps, input_dim)
    input = np.ones(shape)

    # basic test
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': 2},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': (1, 2)},
               input_shape=input.shape)
    layer_test(convolutional.ZeroPadding1D,
               kwargs={'padding': {'left_pad': 1, 'right_pad': 2}},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding1D(padding=2)
    layer.build(shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for offset in [0, 1, -1, -2]:
        assert_allclose(np_output[:, offset, :], 0.)
    assert_allclose(np_output[:, 2:-2, :], 1.)

    layer = convolutional.ZeroPadding1D(padding=(1, 2))
    layer.build(shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for left_offset in [0]:
        assert_allclose(np_output[:, left_offset, :], 0.)
    for right_offset in [-1, -2]:
        assert_allclose(np_output[:, right_offset, :], 0.)
    assert_allclose(np_output[:, 1:-2, :], 1.)
    layer.get_config()
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_zero_padding_3d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 4
    input_len_dim2 = 5
    input_len_dim3 = 3

    input = np.ones((nb_samples,
                     input_len_dim1, input_len_dim2, input_len_dim3,
                     stack_size))

    # basic test
    layer_test(convolutional.ZeroPadding3D,
               kwargs={'padding': (2, 2, 2)},
               input_shape=input.shape)

    # correctness test
    layer = convolutional.ZeroPadding3D(padding=(2, 2, 2))
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    for offset in [0, 1, -1, -2]:
        assert_allclose(np_output[:, offset, :, :, :], 0.)
        assert_allclose(np_output[:, :, offset, :, :], 0.)
        assert_allclose(np_output[:, :, :, offset, :], 0.)
    assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
    layer.get_config()
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_upsampling_2d():
    nb_samples = 2
    stack_size = 2
    input_nb_row = 11
    input_nb_col = 12

    for dim_ordering in ['th', 'tf']:
        if dim_ordering == 'th':
            input = np.random.rand(nb_samples, stack_size, input_nb_row,
                                   input_nb_col)
        else:  # tf
            input = np.random.rand(nb_samples, input_nb_row, input_nb_col,
                                   stack_size)

        for length_row in [2, 3, 9]:
            for length_col in [2, 3, 9]:
                layer = convolutional.UpSampling2D(
                    size=(length_row, length_col),
                    dim_ordering=dim_ordering)
                layer.build(input.shape)
                output = layer(K.variable(input))
                np_output = K.eval(output)
                if dim_ordering == 'th':
                    assert np_output.shape[2] == length_row * input_nb_row
                    assert np_output.shape[3] == length_col * input_nb_col
                else:  # tf
                    assert np_output.shape[1] == length_row * input_nb_row
                    assert np_output.shape[2] == length_col * input_nb_col

                # compare with numpy
                if dim_ordering == 'th':
                    expected_out = np.repeat(input, length_row, axis=2)
                    expected_out = np.repeat(expected_out, length_col, axis=3)
                else:  # tf
                    expected_out = np.repeat(input, length_row, axis=1)
                    expected_out = np.repeat(expected_out, length_col, axis=2)

                assert_allclose(np_output, expected_out)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_cropping_3d():
    nb_samples = 2
    stack_size = 2
    input_len_dim1 = 8
    input_len_dim2 = 8
    input_len_dim3 = 8
    cropping = ((2, 2), (3, 3), (2, 3))
    dim_ordering = K.image_dim_ordering()

    if dim_ordering == 'th':
        input = np.random.rand(nb_samples, stack_size,
                               input_len_dim1, input_len_dim2, input_len_dim3)
    else:
        input = np.random.rand(nb_samples,
                               input_len_dim1, input_len_dim2,
                               input_len_dim3, stack_size)
    # basic test
    layer_test(convolutional.Cropping3D,
               kwargs={'cropping': cropping,
                       'dim_ordering': dim_ordering},
               input_shape=input.shape)
    # correctness test
    layer = convolutional.Cropping3D(cropping=cropping,
                                     dim_ordering=dim_ordering)
    layer.build(input.shape)
    output = layer(K.variable(input))
    np_output = K.eval(output)
    # compare with numpy
    if dim_ordering == 'th':
        expected_out = input[:,
                             :,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1],
                             cropping[2][0]: -cropping[2][1]]
    else:
        expected_out = input[:,
                             cropping[0][0]: -cropping[0][1],
                             cropping[1][0]: -cropping[1][1],
                             cropping[2][0]: -cropping[2][1],
                             :]
    assert_allclose(np_output, expected_out)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_batchnorm_mode_0_convnet():
    model = Sequential()
    norm_m0 = normalization.BatchNormalization(mode=0, axis=1, input_shape=(3, 4, 4), momentum=0.8)
    model.add(norm_m0)
    model.compile(loss='mse', optimizer='sgd')

    # centered on 5.0, variance 10.0
    X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
    model.fit(X, X, nb_epoch=4, verbose=0)
    out = model.predict(X)
    out -= np.reshape(K.eval(norm_m0.beta), (1, 3, 1, 1))
    out /= np.reshape(K.eval(norm_m0.gamma), (1, 3, 1, 1))

    assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
    assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_batchnorm_mode_1():
    norm_m1 = normalization.BatchNormalization(input_shape=(10,), mode=1)
    norm_m1.build(input_shape=(None, 10))

    for inp in [input_1, input_2, input_3]:
        out = (norm_m1.call(K.variable(inp)) - norm_m1.beta) / norm_m1.gamma
        assert_allclose(K.eval(K.mean(out)), 0.0, atol=1e-1)
        if inp.std() > 0.:
            assert_allclose(K.eval(K.std(out)), 1.0, atol=1e-1)
        else:
            assert_allclose(K.eval(K.std(out)), 0.0, atol=1e-1)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_regularizer(layer_class):
    layer = layer_class(output_dim, return_sequences=False, weights=None,
                        batch_input_shape=(nb_samples, timesteps, embedding_dim),
                        W_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        U_regularizer=regularizers.WeightRegularizer(l1=0.01),
                        b_regularizer='l2')
    shape = (nb_samples, timesteps, embedding_dim)
    layer.build(shape)
    output = layer(K.variable(np.ones(shape)))
    K.eval(output)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_matthews_correlation():
    y_true = K.variable(np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]))
    y_pred = K.variable(np.array([1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]))

    # Calculated using sklearn.metrics.matthews_corrcoef
    expected = -0.14907119849998601

    actual = K.eval(metrics.matthews_correlation(y_true, y_pred))
    epsilon = 1e-05
    assert expected - epsilon <= actual <= expected + epsilon
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_precision():
    y_true = K.variable(np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]))
    y_pred = K.variable(np.array([1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]))

    # Calculated using sklearn.metrics.precision_score
    expected = 0.40000000000000002

    actual = K.eval(metrics.precision(y_true, y_pred))
    epsilon = 1e-05
    assert expected - epsilon <= actual <= expected + epsilon
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_recall():
    y_true = K.variable(np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]))
    y_pred = K.variable(np.array([1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]))

    # Calculated using sklearn.metrics.recall_score
    expected = 0.2857142857142857

    actual = K.eval(metrics.recall(y_true, y_pred))
    epsilon = 1e-05
    assert expected - epsilon <= actual <= expected + epsilon
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_fbeta_score():
    y_true = K.variable(np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]))
    y_pred = K.variable(np.array([1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]))

    # Calculated using sklearn.metrics.fbeta_score
    expected = 0.30303030303030304

    actual = K.eval(metrics.fbeta_score(y_true, y_pred, beta=2))
    epsilon = 1e-05
    assert expected - epsilon <= actual <= expected + epsilon
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_fmeasure():
    y_true = K.variable(np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]))
    y_pred = K.variable(np.array([1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0]))

    # Calculated using sklearn.metrics.f1_score
    expected = 0.33333333333333331

    actual = K.eval(metrics.fmeasure(y_true, y_pred))
    epsilon = 1e-05
    assert expected - epsilon <= actual <= expected + epsilon
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_top_k_categorical_accuracy():
    y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
    y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
    success_result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred,
                            k=3))
    assert success_result == 1
    partial_result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred,
                            k=2))
    assert partial_result == 0.5
    failure_result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred,
                            k=1))
    assert failure_result == 0
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_objective_shapes_3d():
    y_a = K.variable(np.random.random((5, 6, 7)))
    y_b = K.variable(np.random.random((5, 6, 7)))
    for obj in allobj:
        objective_output = obj(y_a, y_b)
        assert K.eval(objective_output).shape == (5, 6)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_objective_shapes_2d():
    y_a = K.variable(np.random.random((6, 7)))
    y_b = K.variable(np.random.random((6, 7)))
    for obj in allobj:
        objective_output = obj(y_a, y_b)
        assert K.eval(objective_output).shape == (6,)