Python keras.backend 模块,placeholder() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.placeholder()

项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_softmax():
    '''
    Test using a reference implementation of softmax
    '''
    def softmax(values):
        m = np.max(values)
        e = np.exp(values - m)
        return e / np.sum(e)

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softmax(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def reverse_generator(generator, X_sample, y_sample, title):
    """Gradient descent to map images back to their latent vectors."""

    latent_vec = np.random.normal(size=(1, 100))

    # Function for figuring out how to bump the input.
    target = K.placeholder()
    loss = K.sum(K.square(generator.outputs[0] - target))
    grad = K.gradients(loss, generator.inputs[0])[0]
    update_fn = K.function(generator.inputs + [target], [grad])

    # Repeatedly apply the update rule.
    xs = []
    for i in range(60):
        print('%d: latent_vec mean=%f, std=%f'
              % (i, np.mean(latent_vec), np.std(latent_vec)))
        xs.append(generator.predict_on_batch([latent_vec, y_sample]))
        for _ in range(10):
            update_vec = update_fn([latent_vec, y_sample, X_sample])[0]
            latent_vec -= update_vec * update_rate

    # Plots the samples.
    xs = np.concatenate(xs, axis=0)
    plot_as_gif(xs, X_sample, title)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_sigmoid():
    '''
    Test using a numerically stable reference sigmoid implementation
    '''
    def ref_sigmoid(x):
        if x >= 0:
            return 1 / (1 + np.exp(-x))
        else:
            z = np.exp(x)
            return z / (1 + z)
    sigmoid = np.vectorize(ref_sigmoid)

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.sigmoid(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = sigmoid(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def keras_wrap(model, target, output, loss):
    """ Convenience function for wrapping a Keras loss function.
    """
    # pylint: disable=import-error
    import keras.objectives as O
    import keras.backend as K
    # pylint: enable=import-error
    if isinstance(loss, str):
        loss = O.get(loss)
    shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
    ins = [
        (target, K.placeholder(
            ndim=len(shape),
            dtype=K.dtype(model.outputs[target].value),
            name=target
        ))
    ]
    out = loss(ins[0][1], output)
    return ins, out

###############################################################################
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_softmax():
    '''
    Test using a reference implementation of softmax
    '''
    def softmax(values):
        m = np.max(values)
        e = np.exp(values - m)
        return e / np.sum(e)

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softmax(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_sigmoid():
    '''
    Test using a numerically stable reference sigmoid implementation
    '''
    def ref_sigmoid(x):
        if x >= 0:
            return 1 / (1 + np.exp(-x))
        else:
            z = np.exp(x)
            return z / (1 + z)
    sigmoid = np.vectorize(ref_sigmoid)

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.sigmoid(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = sigmoid(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:conv_qsar_fast    作者:connorcoley    | 项目源码 | 文件源码
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform', 
            activation_output='softmax', init_inner='identity',
            activation_inner='linear', scale_output=0.01, padding=False, **kwargs):
        if depth < 1:
            quit('Cannot use GraphFP with depth zero')
        self.init_output = initializations.get(init_output)
        self.activation_output = activations.get(activation_output)
        self.init_inner = initializations.get(init_inner)
        self.activation_inner = activations.get(activation_inner)
        self.output_dim = output_dim
        self.inner_dim = inner_dim
        self.depth = depth
        self.scale_output = scale_output
        self.padding = padding

        self.initial_weights = None
        self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor
        if self.input_dim:
            kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input
        #self.input = K.placeholder(ndim = 4)
        super(GraphFP, self).__init__(**kwargs)
项目:conv_qsar_fast    作者:connorcoley    | 项目源码 | 文件源码
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform', 
            activation_output='softmax', init_inner='identity',
            activation_inner='linear', scale_output=0.01, padding=False, **kwargs):
        if depth < 1:
            quit('Cannot use GraphFP with depth zero')
        self.init_output = initializations.get(init_output)
        self.activation_output = activations.get(activation_output)
        self.init_inner = initializations.get(init_inner)
        self.activation_inner = activations.get(activation_inner)
        self.output_dim = output_dim
        self.inner_dim = inner_dim
        self.depth = depth
        self.scale_output = scale_output
        self.padding = padding

        self.initial_weights = None
        self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor
        if self.input_dim:
            kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input
        #self.input = K.placeholder(ndim = 4)
        super(GraphFP, self).__init__(**kwargs)
项目:NN_sentiment    作者:hx364    | 项目源码 | 文件源码
def build(self):
        self.input = K.placeholder(shape=(self.input_shape[0], self.input_length),
                                   dtype='int32')
        self.W = self.init((self.input_dim, self.output_dim))
        self.trainable_weights = [self.W]
        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
项目:Deconvnet-keras    作者:Jallet    | 项目源码 | 文件源码
def __init__(self, layer, linear = False):
        '''
        # Arguments
            layer: an instance of Activation layer, whose configuration 
                   will be used to initiate DActivation(input_shape, 
                   output_shape, weights)
        '''
        self.layer = layer
        self.linear = linear
        self.activation = layer.activation
        input = K.placeholder(shape = layer.output_shape)

        output = self.activation(input)
        # According to the original paper, 
        # In forward pass and backward pass, do the same activation(relu)
        self.up_func = K.function(
                [input, K.learning_phase()], output)
        self.down_func = K.function(
                [input, K.learning_phase()], output)

    # Compute activation in forward pass
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_softmax():
    '''
    Test using a reference implementation of softmax
    '''
    def softmax(values):
        m = np.max(values)
        e = np.exp(values - m)
        return e / np.sum(e)

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softmax(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_sigmoid():
    '''
    Test using a numerically stable reference sigmoid implementation
    '''
    def ref_sigmoid(x):
        if x >= 0:
            return 1 / (1 + np.exp(-x))
        else:
            z = np.exp(x)
            return z / (1 + z)
    sigmoid = np.vectorize(ref_sigmoid)

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.sigmoid(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = sigmoid(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_grams_loss(self):
        input = np.zeros((1, 3, 4, 4))
        iter = 0
        for i in range(input.shape[1]):
            for j in range(input.shape[2]):
                for k in range(input.shape[3]):
                    input[0][i][j][k] = iter
                    iter += 1
        input = input.astype(K.floatx())


        x = K.placeholder(input.shape, name='x')
        gram_mat = grams(x)
        loss = frobenius_error(gram_mat, np.ones((1, 3, 3)))
        get_loss = K.function([x], [loss])

        error = get_loss([input])[0]
        true_error = 60344.299382716

        self.assertEqual(np.round(error.item(0)), np.round(true_error))
项目:kur    作者:deepgram    | 项目源码 | 文件源码
def keras_wrap(model, target, output, loss):
    """ Convenience function for wrapping a Keras loss function.
    """
    # pylint: disable=import-error
    import keras.objectives as O
    import keras.backend as K
    # pylint: enable=import-error
    if isinstance(loss, str):
        loss = O.get(loss)
    shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
    ins = [
        (target, K.placeholder(
            ndim=len(shape),
            dtype=K.dtype(model.outputs[target].value),
            name=target
        ))
    ]
    out = loss(ins[0][1], output)
    return ins, out

###############################################################################
项目:NeuralSentenceOrdering    作者:FudanNLP    | 项目源码 | 文件源码
def __init__(self, input_dim, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid'):
        #self.input_dim = input_dim
        self.output_dim = int(output_dim / 2)
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)

        self.input_dim = input_dim
        #self.input = K.placeholder(input_shape)

        # initial states: 2 all-zero tensor of shape (output_dim)
        self.forward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim)
        self.backward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim)

        self.params = self.forward_lstm.params + self.backward_lstm.params

        #if self.initial_weights is not None:
        #    self.set_weights(self.initial_weights)
        #    del self.initial_weights
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def __init__(self, X_train, X_test, loss, verbose=1, batch_size = 1,
             label='loss', every_n_epochs=1, display_delta=True):
        super(UnsupervisedLoss2Logger, self).__init__()
        self.X_train = X_train
        self.X_test = X_test
        self.loss = loss
        self.verbose = verbose
        self.label = label
        self.every_n_epochs = every_n_epochs
        self.display_delta = display_delta
        self.prev_loss = None
        self.batch_size = batch_size

        input_train = K.placeholder(shape=self.X_train.shape)
        input_test = K.placeholder(shape=self.X_test.shape)
        loss = self.loss(input_train, input_test)
        ins = [input_train, input_test]
        self.loss_function = K.function(ins, loss)
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def on_train_begin(self, logs={}):
        N    = self.mi_calculator.miN
        dims = self.mi_calculator.data.shape[1]
        Kdists = K.placeholder(ndim=2)
        Klogvar = K.placeholder(ndim=0)

        lossfunc = K.function([Kdists, Klogvar,], [kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar))])
        jacfunc  = K.function([Kdists, Klogvar,], K.gradients(kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar)), Klogvar))

        def obj(logvar, dists):
            return lossfunc([dists, logvar.flat[0]])[0]
        def jac(logvar, dists):
            return np.atleast_2d(np.array(jacfunc([dists, logvar.flat[0]])))[0] 

        self.obj = obj
        self.jac = jac
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_time_distributed_softmax():
    x = K.placeholder(shape=(1, 1, 5))
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()
    test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
    f([test_values])[0]
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_softplus():
    '''
    Test using a reference softplus implementation
    '''
    def softplus(x):
        return np.log(np.ones_like(x) + np.exp(x))

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softplus(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softplus(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_softsign():
    '''
    Test using a reference softsign implementation
    '''
    def softsign(x):
        return np.divide(x, np.ones_like(x) + np.absolute(x))

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softsign(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softsign(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_tanh():
    test_values = get_standard_values()

    x = K.placeholder(ndim=2)
    exp = activations.tanh(x)
    f = K.function([x], [exp])

    result = f([test_values])[0]
    expected = np.tanh(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def build(self):
        input_shape = self.input_shape
        dim = self.hidden_dim
        self.input_dim = dim
        self.output_dim = dim
        self.input = K.placeholder(input_shape)
        hdim = self.hidden_dim

        if self.stateful or self.state_input or len(self.state_outputs) > 0:
            self.reset_states()
        else:
            # initial states: 2 all-zero tensor of shape (hidden_dim)
            self.states = [None, None]

        self.W_i = self.init((dim, hdim))
        self.U_i = self.inner_init((hdim, hdim))
        self.b_i = K.zeros((hdim))

        self.W_f = self.init((dim, hdim))
        self.U_f = self.inner_init((hdim, hdim))
        self.b_f = self.forget_bias_init((hdim))

        self.W_c = self.init((dim, hdim))
        self.U_c = self.inner_init((hdim, hdim))
        self.b_c = K.zeros((hdim))

        self.W_o = self.init((dim, hdim))
        self.U_o = self.inner_init((hdim, hdim))
        self.b_o = K.zeros((hdim))

        self.W_x = self.init((hdim, dim))
        self.b_x = K.zeros((dim))

        self.trainable_weights = [
            self.W_i, self.U_i, self.b_i,
            self.W_c, self.U_c, self.b_c,
            self.W_f, self.U_f, self.b_f,
            self.W_o, self.U_o, self.b_o,
            self.W_x, self.b_x
        ]
项目:sciDT    作者:edvisees    | 项目源码 | 文件源码
def __init__(self, output_dim,
                 init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 input_dim=None, input_length1=None, input_length2=None, **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        self.input_length1 = input_length1
        self.input_length2 = input_length2
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim)
        self.input = K.placeholder(ndim=4)
        super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
项目:RNNIPTag    作者:ml-slac    | 项目源码 | 文件源码
def __init__(self, input_shape, **kwargs):
        #super(TimeDistributedPassThrough, self).__init__()
        #self.input_shape = input_shape
        #self.output_shape = input_shape
        self.input = K.placeholder(ndim=3)
        kwargs['input_shape'] = input_shape
        super(TimeDistributedPassThrough, self).__init__(**kwargs)
项目:RNNIPTag    作者:ml-slac    | 项目源码 | 文件源码
def EvaluateJacobian(model):
    #theano.function( [model.layers[0].input], T.jacobian(model.layers[-1].output.flatten(), model.layers[0].input) )


    X = K.placeholder(shape=(15,15)) #specify the right placeholder
    Y = K.sum(K.square(X)) # loss function
    fn = K.function([X], K.gradients(Y, [X])) #function to call the gradient
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def get_loss(self, model, target, output):
        """ Returns the loss tensor for this output.

            # Arguments

            model: Model instance.
            target: str. The name of the output layer to apply the loss
                function to.
            output: tensor (implemented-specific). The symbolic tensor for this
                output layer.

            # Return value

            A tuple of the form:

            ```python
            (
                # Input tensors
                [
                    (input_name, placeholder),
                    (input_name, placeholder),
                    ...
                ],

                # Output value
                loss_value
            )
The derived class is required to return all required input
        placeholder, including placeholders for the target model outputs.
    """
    raise NotImplementedError

EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF

```

项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def build(self, a_image, ap_image, b_image, output_shape):
        self.output_shape = output_shape
        loss = self.build_loss(a_image, ap_image, b_image)
        # get the gradients of the generated image wrt the loss
        grads = K.gradients(loss, self.net_input)
        outputs = [loss]
        if type(grads) in {list, tuple}:
            outputs += grads
        else:
            outputs.append(grads)
        f_inputs = [self.net_input]
        for nnf in self.feature_nnfs:
            f_inputs.append(nnf.placeholder)
        self.f_outputs = K.function(f_inputs, outputs)
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def __init__(self, matcher, f_layer):
        self.matcher = matcher
        mis = matcher.input_shape
        self.placeholder = K.placeholder(mis[::-1])
        self.f_layer = f_layer
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_time_distributed_softmax():
    x = K.placeholder(shape=(1, 1, 5))
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()
    test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
    f([test_values])[0]
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_softplus():
    '''
    Test using a reference softplus implementation
    '''
    def softplus(x):
        return np.log(np.ones_like(x) + np.exp(x))

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softplus(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softplus(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_softsign():
    '''
    Test using a reference softsign implementation
    '''
    def softsign(x):
        return np.divide(x, np.ones_like(x) + np.absolute(x))

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softsign(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softsign(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_tanh():
    test_values = get_standard_values()

    x = K.placeholder(ndim=2)
    exp = activations.tanh(x)
    f = K.function([x], [exp])

    result = f([test_values])[0]
    expected = np.tanh(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:BioNLP-2016    作者:cambridgeltl    | 项目源码 | 文件源码
def build(self):
        self.input = K.placeholder(shape=(self.input_shape[0],
                                          self.input_length),
                                   dtype='int32')
        self.W = K.variable(self.initial_weights[0])
        self.trainable_weights = []
        self.regularizers = []
项目:image-analogies    作者:awentzonline    | 项目源码 | 文件源码
def __init__(self, matcher, f_layer):
        self.matcher = matcher
        mis = matcher.input_shape
        self.placeholder = K.placeholder(mis[::-1])
        self.f_layer = f_layer
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_time_distributed_softmax():
    x = K.placeholder(shape=(1, 1, 5))
    f = K.function([x], [activations.softmax(x)])
    test_values = get_standard_values()
    test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
    f([test_values])[0]
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_softplus():
    '''
    Test using a reference softplus implementation
    '''
    def softplus(x):
        return np.log(np.ones_like(x) + np.exp(x))

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softplus(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softplus(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_softsign():
    '''
    Test using a reference softsign implementation
    '''
    def softsign(x):
        return np.divide(x, np.ones_like(x) + np.absolute(x))

    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.softsign(x)])
    test_values = get_standard_values()

    result = f([test_values])[0]
    expected = softsign(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.relu(x)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_elu():
    x = K.placeholder(ndim=2)
    f = K.function([x], [activations.elu(x, 0.5)])

    test_values = get_standard_values()
    result = f([test_values])[0]

    # because no negatives in test values
    assert_allclose(result, test_values, rtol=1e-05)

    negative_values = np.array([[-1, -2]], dtype=K.floatx())
    result = f([negative_values])[0]
    true_result = (np.exp(negative_values) - 1) / 2

    assert_allclose(result, true_result)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_tanh():
    test_values = get_standard_values()

    x = K.placeholder(ndim=2)
    exp = activations.tanh(x)
    f = K.function([x], [exp])

    result = f([test_values])[0]
    expected = np.tanh(test_values)
    assert_allclose(result, expected, rtol=1e-05)
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_total_variation_error(self):
        # Prepare input
        input = np.zeros((1, 3, 4, 4))
        iter = 0
        for i in range(input.shape[1]):
            for j in range(input.shape[2]):
                for k in range(input.shape[3]):
                    input[0][i][j][k] = iter
                    iter += 1
        input = input.astype('float32')

        x = K.placeholder(input.shape, name='x')
        loss = total_variation_error(x, 2)
        grad = K.gradients(loss, x)
        get_grads = K.function([x], grad)

        # GradInput result for beta = 2
        true_grad = np.array([[
            [
                [-5, -4, -4,  1],
                [-1, 0, 0, 1],
                [-1, 0, 0, 1],
                [4, 4, 4, 0]
            ],
            [
                [-5, -4, -4,  1],
                [-1, 0, 0, 1],
                [-1, 0, 0, 1],
                [4, 4, 4, 0]
            ],
            [
                [-5, -4, -4,  1],
                [-1, 0, 0, 1],
                [-1, 0, 0, 1],
                [4, 4, 4, 0]
            ],
        ]]).astype(K.floatx())

        self.assertEqual(True, (get_grads([input])==true_grad).all())
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_grams_th(self):
        previous_image_dim_ordering = K.image_dim_ordering()
        K.set_image_dim_ordering('th')

        input = np.zeros((1, 3, 4, 4))
        iter = 0
        for i in range(input.shape[1]):
            for j in range(input.shape[2]):
                for k in range(input.shape[3]):
                    input[0][i][j][k] = iter
                    iter += 1
        input = input.astype(K.floatx())

        true_grams = np.array([[
                    [1240, 3160, 5080],
                    [3160, 9176,15192],
                    [5080,  15192,  25304]
                ]]).astype(K.floatx())
        true_grams /= input.shape[1] * input.shape[2] * input.shape[3]

        x = K.placeholder(input.shape, name='x')
        gram_mat = grams(x)
        get_grams = K.function([x], [gram_mat])        
        K.set_image_dim_ordering(previous_image_dim_ordering)

        pred_grams = get_grams([input])[0]
        self.assertEqual(True, (pred_grams==true_grams).all())
项目:neural_style    作者:metaflow-ai    | 项目源码 | 文件源码
def test_grams_tf(self):
        previous_image_dim_ordering = K.image_dim_ordering()
        K.set_image_dim_ordering('tf')

        input = np.zeros((1, 3, 4, 4))
        iter = 0
        for i in range(input.shape[1]):
            for j in range(input.shape[2]):
                for k in range(input.shape[3]):
                    input[0][i][j][k] = iter
                    iter += 1
        input = input.astype(K.floatx())
        input = np.transpose(input, (0, 2, 3, 1))

        true_grams = np.array([[
                    [1240, 3160, 5080],
                    [3160, 9176,15192],
                    [5080,  15192,  25304]
                ]]).astype(K.floatx())
        true_grams /= input.shape[1] * input.shape[2] * input.shape[3]

        x = K.placeholder(input.shape, name='x')
        gram_mat = grams(x)
        get_grams = K.function([x], [gram_mat])
        K.set_image_dim_ordering(previous_image_dim_ordering)   

        pred_grams = get_grams([input])[0]
        self.assertEqual(True, (pred_grams==true_grams).all())
项目:kur    作者:deepgram    | 项目源码 | 文件源码
def get_loss(self, model, target, output):
        """ Returns the loss tensor for this output.

            # Arguments

            model: Model instance.
            target: str. The name of the output layer to apply the loss
                function to.
            output: tensor (implemented-specific). The symbolic tensor for this
                output layer.

            # Return value

            A tuple of the form:

            ```python
            (
                # Input tensors
                [
                    (input_name, placeholder),
                    (input_name, placeholder),
                    ...
                ],

                # Output value
                loss_value
            )
The derived class is required to return all required input
        placeholder, including placeholders for the target model outputs.
    """
    raise NotImplementedError

EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF

```

项目:ensemble-adv-training    作者:ftramer    | 项目源码 | 文件源码
def main(model_name, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_mnist_flags()

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()

    data_gen = data_gen_mnist(X_train)

    x = K.placeholder((None,
                       FLAGS.IMAGE_ROWS,
                       FLAGS.IMAGE_COLS,
                       FLAGS.NUM_CHANNELS
                       ))

    y = K.placeholder(shape=(None, FLAGS.NUM_CLASSES))

    model = model_mnist(type=model_type)

    # Train an MNIST model
    tf_train(x, y, model, X_train, Y_train, data_gen)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, X_test, Y_test)
    print('Test error: %.1f%%' % test_error)
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name+'.json', 'wr') as f:
        f.write(json_string)
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def build(self):
        input_shape = self.input_shape
        dim = input_shape[-1]
        self.input_dim = dim

        self.input = K.placeholder(input_shape)
        if not self.hidden_dim:
            self.hidden_dim = dim
        hdim = self.hidden_dim
        self.output_dim = dim
        outdim = self.output_dim
        if self.stateful or self.state_input or len(self.state_outputs) > 0:
            self.reset_states()
        else:
            # initial states: 2 all-zero tensor of shape (hidden_dim)
            self.states = [None, None]

        self.W_i = self.init((dim, hdim))
        self.U_i = self.inner_init((hdim, hdim))
        self.b_i = K.zeros((hdim))

        self.W_f = self.init((dim, hdim))
        self.U_f = self.inner_init((hdim, hdim))
        self.b_f = self.forget_bias_init((hdim))

        self.W_c = self.init((dim, hdim))
        self.U_c = self.inner_init((hdim, hdim))
        self.b_c = K.zeros((hdim))

        self.W_o = self.init((dim, hdim))
        self.U_o = self.inner_init((hdim, hdim))
        self.b_o = K.zeros((hdim))

        self.W_x = self.init((hdim, outdim))
        self.b_x = K.zeros((dim))

        self.trainable_weights = [
            self.W_i, self.U_i, self.b_i,
            self.W_c, self.U_c, self.b_c,
            self.W_f, self.U_f, self.b_f,
            self.W_o, self.U_o, self.b_o,
            self.W_x, self.b_x
        ]