Python keras.backend 模块,gradients() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.gradients()

项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def reverse_generator(generator, X_sample, y_sample, title):
    """Gradient descent to map images back to their latent vectors."""

    latent_vec = np.random.normal(size=(1, 100))

    # Function for figuring out how to bump the input.
    target = K.placeholder()
    loss = K.sum(K.square(generator.outputs[0] - target))
    grad = K.gradients(loss, generator.inputs[0])[0]
    update_fn = K.function(generator.inputs + [target], [grad])

    # Repeatedly apply the update rule.
    xs = []
    for i in range(60):
        print('%d: latent_vec mean=%f, std=%f'
              % (i, np.mean(latent_vec), np.std(latent_vec)))
        xs.append(generator.predict_on_batch([latent_vec, y_sample]))
        for _ in range(10):
            update_vec = update_fn([latent_vec, y_sample, X_sample])[0]
            latent_vec -= update_vec * update_rate

    # Plots the samples.
    xs = np.concatenate(xs, axis=0)
    plot_as_gif(xs, X_sample, title)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    x = x.reshape((1,) + img_size)
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):
    """Calculates the gradient penalty loss for a batch of "averaged" samples.

    In Improved WGANs, the 1-Lipschitz constraint is enforced by adding a term to the loss function
    that penalizes the network if the gradient norm moves away from 1. However, it is impossible to evaluate
    this function at all points in the input space. The compromise used in the paper is to choose random points
    on the lines between real and generated samples, and check the gradients at these points. Note that it is the
    gradient w.r.t. the input averaged samples, not the weights of the discriminator, that we're penalizing!

    In order to evaluate the gradients, we must first run samples through the generator and evaluate the loss.
    Then we get the gradients of the discriminator w.r.t. the input averaged samples.
    The l2 norm and penalty can then be calculated for this gradient.

    Note that this loss function requires the original averaged samples as input, but Keras only supports passing
    y_true and y_pred to loss functions. To get around this, we make a partial() of the function with the
    averaged_samples argument, and use that for model training."""
    gradients = K.gradients(K.sum(y_pred), averaged_samples)
    gradient_l2_norm = K.sqrt(K.sum(K.square(gradients)))
    gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)
    return gradient_penalty
项目:facejack    作者:PetarV-    | 项目源码 | 文件源码
def __init__(self, mdl, x):
        self.loss_value = None
        self.grad_values = None
        self.mdl = mdl

        loss = K.variable(0.)
        layer_dict = dict([(layer.name, layer) for layer in mdl.layers])

        inp = layer_dict['face'].output
        out = layer_dict['conf'].output

        loss -= K.sum(out)
        # Might want to add some L2-loss in here, depending on output
        # loss += 0.0005 * K.sum(K.square(inp - x))
        grads = K.gradients(loss, inp)

        outputs = [loss]
        if type(grads) in {list, tuple}:
            outputs += grads
        else:
            outputs.append(grads)

        self.f_outputs = K.function([inp, K.learning_phase()], outputs)
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_data_format() == 'channels_first':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:dogsVScats    作者:prajwalkr    | 项目源码 | 文件源码
def visualize(model, layer_name):
    print 'Model loaded.'
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    for filter_index in sample(range(0, layer_dict[layer_name].nb_filter),10):
        layer_output = layer_dict[layer_name].output
        loss = K.mean(layer_output[:, filter_index, :, :])
        grads = K.gradients(loss, model.layers[0].input)[0]
        grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
        iterate = K.function([model.layers[0].input, K.learning_phase()], [loss, grads])

        input_img_data = np.asarray([read_image('visimage.jpg')])

        for _ in xrange(100):
            loss_value, grads_value = iterate([input_img_data, 0])
            input_img_data += grads_value * 3

        img = deprocess_image(input_img_data[0])
        write_image(img, '../activations/out{}.jpg'.format(filter_index))
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_gradcam(image,model,layer_name,mode):
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    if mode == "abnormal":
        loss += K.sum(model.output)
    elif mode == "normal":
        loss += K.sum(1 - model.output)
    else:
        raise ValueError("mode must be normal or abnormal")
    #gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss,layer.output)[0]
    feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap])
    return fetch_heatmap([image,0])[0]
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x should be an output and a target
        assert len(x) == 2

        losses = _per_sample_loss(self.loss, mask, x)
        if self.fast:
            grads = K.sqrt(sum([
                K.sum(K.square(g), axis=1)
                for g in K.gradients(losses, self.parameter_list)
            ]))
        else:
            nb_samples = K.shape(losses)[0]
            grads = K.map_fn(
                lambda i: self._grad_norm(losses[i]),
                K.arange(0, nb_samples),
                dtype=K.floatx()
            )

        return K.reshape(grads, (-1, 1))
项目:yupgi_alert0    作者:forcecore    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_data_format() == 'channels_first':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_width, img_height))
    else:
        x = x.reshape((1, img_width, img_height, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values


# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_width, img_height))
    else:
        x = x.reshape((1, img_width, img_height, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values


# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    x = x.reshape((1,) + img_size)
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def iterate_softmax(model, neuron):
    input_tensor = model.input

    # this is a placeholder tensor that will contain our generated images

    # build a loss function that maximizes the activation
    # of the nth filter of the layer considered
    print('X shape', model.output[:, neuron])
    x = model.output

    loss_weight_continuity = 0.0
    loss_weight_activity = 1.0

    loss = K.mean(x)
    #loss += loss_weight_continuity * total_variation_norm(input_tensor)

    # compute the gradient of the input picture wrt this loss
    grads = K.gradients(loss, input_tensor)[0]
    # normalization trick: we normalize the gradient
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    # this function returns the loss and grads given the input picture
    return K.function([input_tensor], [loss, grads])
项目:PyDeepStyle    作者:bennycheung    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_width, img_height))
    else:
        x = x.reshape((1, img_width, img_height, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values


# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:rldurak    作者:janEbert    | 项目源码 | 文件源码
def __init__(
            self, sess, state_shape, action_shape, load=True, optimizer='adam',
            alpha=0.001, epsilon=1e-8, tau=0.001, neurons_per_layer=[100, 50]):
        """Initialize a critic with the given session, learning rate,
        update factor and neurons in the hidden layers.

        If load is true, load the model instead of creating a new one.
        """
        self.sess = sess
        self.state_shape = state_shape
        self.action_shape = action_shape
        self.optimizer_choice = optimizer.lower()
        self.alpha = alpha
        self.tau = tau
        if len(neurons_per_layer) < 2:
            if not neurons_per_layer:
                self.neurons_per_layer = [100, 50]
            else:
                self.neurons_per_layer.append(50)
            print('Neurons per layer for the critic have been adjusted')
        else:
            self.neurons_per_layer = neurons_per_layer
        K.set_session(sess)
        self.model, self.state_input, self.action_input = self.create_model(
                epsilon)
        self.target_model = self.create_model(epsilon)[0]
        self.action_gradients = K.gradients(self.model.output,
                self.action_input)
        self.sess.run(tf.global_variables_initializer())
        if load:
            self.load_weights()
        self.model._make_predict_function()
        self.target_model._make_predict_function()
项目:rl    作者:wingedsheep    | 项目源码 | 文件源码
def get_gradients(self, model):
        """Return the gradient of every trainable weight in model

        Parameters
        -----------
        model : a keras model instance

        First, find all tensors which are trainable in the model. Surprisingly,
        `model.trainable_weights` will return tensors for which
        trainable=False has been set on their layer (last time I checked), hence the extra check.
        Next, get the gradients of the loss with respect to the weights.

        """
        weights = [tensor for tensor in model.trainable_weights if model.get_layer(tensor.name[:-2]).trainable]
        optimizer = model.optimizer

        return optimizer.get_gradients(model.total_loss, weights)
项目:keras-hands-on    作者:danielvarga    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    x = x.reshape((1, 3, img_width, img_height))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    x = x.reshape((1,) + img_size)
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def get_gradients(self, loss, params):
    '''
    Replacement for the default keras get_gradients() function.
    Modification: checks if the object has the attribute grads and 
    returns that rather than calculating the gradients using automatic
    differentiation. 
    '''
    if hasattr(self, 'grads'):
        grads = self.grads
    else:
        grads = K.gradients(loss, params)
    if hasattr(self, 'clipnorm') and self.clipnorm > 0:
        norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
        grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
    if hasattr(self, 'clipvalue') and self.clipvalue > 0:
        grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
    return grads
项目:deeplearning-implementations    作者:xxlatgh    | 项目源码 | 文件源码
def style_recreate():
    '''
    returns an image of recreated style
    '''
    input_shape = style_arr.shape[1:]
    model = VGG16_Avg(include_top=False, input_shape=input_shape)
    outputs = {l.name: l.output for l in model.layers}
    layers = [outputs['block{}_conv1'.format(o)] for o in range(1,4)]
    layers_model = Model(model.input, layers)
    targs = [K.variable(o) for o in layers_model.predict(style_arr)]
    loss = sum(style_loss(l1[0], l2[0]) for l1,l2 in zip(layers, targs))
    grads = K.gradients(loss, model.input)

    function_input = [model.input]
    function_output = ([loss]+grads)
    style_fn = K.function(function_input, function_output)
    evaluator = Evaluator(style_fn, style_arr.shape)
    style_iterations=10
    x = rand_img(shp)
    x, style_loss_history = solve_image(evaluator, style_iterations, x, style_result_path)
    s_path = style_result_path + '/res_at_iteration_9.png'
    return s_path
项目:deeplearning-implementations    作者:xxlatgh    | 项目源码 | 文件源码
def content_recreate():
    '''
    returns an image of recreated content
    '''
    model = VGG16_Avg(include_top=False)
    layer = model.get_layer('block5_conv1').output
    layer_model = Model(model.input, layer)
    targ = K.variable(layer_model.predict(img_arr))

    loss = metrics.mse(layer, targ)
    grads = K.gradients(loss, model.input)

    function_input = [model.input]
    function_output = ([loss]+grads)
    fn = K.function(function_input, function_output)
    evaluator = Evaluator(fn, img_arr.shape)

    x = rand_img(img_arr.shape)
    content_iterations=10
    x_final, content_loss_history = solve_image(evaluator, content_iterations, x, path = content_result_path)
    c_path = content_result_path + '/res_at_iteration_9.png'
    return c_path
项目:GWS    作者:lijialinneu    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_width, img_height))
    else:
        x = x.reshape((1, img_width, img_height, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values


# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def _get_output_functions(self):
        # if you name your layers you can use model.get_layer('recurrent_layer')
        model = self.tweet_classifier
        recurrent_layer = model.layers[2]
        attention_layer = model.layers[5]
        merged_layer = model.layers[9]
        output_layer = model.layers[10]
        layers = [recurrent_layer, attention_layer, merged_layer, output_layer]   

        outputs = []        
        for l in layers:
            outputs.append(l.output)

            loss = K.mean(model.output)
            grads = K.gradients(loss, l.output)
            grads_norm = grads / (K.sqrt(K.mean(K.square(grads))) + 1e-5)
            outputs.append(grads_norm)

        all_function = K.function([model.layers[0].input, K.learning_phase()],
                                  outputs)
        return all_function
项目:dlcv05    作者:telecombcn-dl    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    x = x.reshape((1, 3, img_width, img_height))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:nonlinearIB    作者:artemyk    | 项目源码 | 文件源码
def on_train_begin(self, logs={}):
        N    = self.mi_calculator.miN
        dims = self.mi_calculator.data.shape[1]
        Kdists = K.placeholder(ndim=2)
        Klogvar = K.placeholder(ndim=0)

        lossfunc = K.function([Kdists, Klogvar,], [kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar))])
        jacfunc  = K.function([Kdists, Klogvar,], K.gradients(kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar)), Klogvar))

        def obj(logvar, dists):
            return lossfunc([dists, logvar.flat[0]])[0]
        def jac(logvar, dists):
            return np.atleast_2d(np.array(jacfunc([dists, logvar.flat[0]])))[0] 

        self.obj = obj
        self.jac = jac
项目:keras-101    作者:burness    | 项目源码 | 文件源码
def eval_loss_and_grads(x):
    if K.image_dim_ordering() == 'th':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def getwhere(x):
    ''' Calculate the "where" mask that contains switches indicating which
    index contained the max value when MaxPool2D was applied.  Using the
    gradient of the sum is a nice trick to keep everything high level.'''
    y_prepool, y_postpool = x
    return K.gradients(K.sum(y_postpool), y_prepool)

# input image dimensions
项目:pCVR    作者:xjtushilei    | 项目源码 | 文件源码
def getwhere(x):
    ''' Calculate the 'where' mask that contains switches indicating which
    index contained the max value when MaxPool2D was applied.  Using the
    gradient of the sum is a nice trick to keep everything high level.'''
    y_prepool, y_postpool = x
    return K.gradients(K.sum(y_postpool), y_prepool)
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_saliency(image, model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss, model.input)[0])
    saliency = K.max(grads, axis=3)
    fetch_saliency = K.function([model.input], [loss, saliency])
    outputs, saliency = fetch_saliency([image])
    K.set_learning_phase(True)
    return saliency
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_gradcam(image, model, layer_name):
    # remove dropout/noise layers
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(model.output)
    # gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss, layer.output)[0]
    feature_weights = K.mean(upstream_grads, axis=[1, 2])
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input], [heatmap])
    return fetch_heatmap([image])[0]
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_saliency(image,model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss,model.input)[0])
    saliency = K.max(grads,axis=3)
    fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency])
    outputs, saliency = fetch_saliency([image,0])
    K.set_learning_phase(True)
    return saliency
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _grad_norm(self, loss):
        grads = K.gradients(loss, self.parameter_list)
        return K.sqrt(
            sum([
                K.sum(K.square(g))
                for g in grads
            ])
        )
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _grad_norm(self, loss):
        grads = K.gradients(loss, self.parameter_list)
        return K.sqrt(
            sum([
                K.sum(K.square(g))
                for g in grads
            ])
        )
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def _grad_norm(self, loss):
        grads = K.gradients(loss, self.parameter_list)
        return K.sqrt(
            sum([
                K.sum(K.square(g))
                for g in grads
            ])
        )
项目:RNNIPTag    作者:ml-slac    | 项目源码 | 文件源码
def EvaluateJacobian(model):
    #theano.function( [model.layers[0].input], T.jacobian(model.layers[-1].output.flatten(), model.layers[0].input) )


    X = K.placeholder(shape=(15,15)) #specify the right placeholder
    Y = K.sum(K.square(X)) # loss function
    fn = K.function([X], K.gradients(Y, [X])) #function to call the gradient
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def build(self, a_image, ap_image, b_image, output_shape):
        self.output_shape = output_shape
        loss = self.build_loss(a_image, ap_image, b_image)
        # get the gradients of the generated image wrt the loss
        grads = K.gradients(loss, self.net_input)
        outputs = [loss]
        if type(grads) in {list, tuple}:
            outputs += grads
        else:
            outputs.append(grads)
        self.f_outputs = K.function([self.net_input], outputs)
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def build(self, a_image, ap_image, b_image, output_shape):
        self.output_shape = output_shape
        loss = self.build_loss(a_image, ap_image, b_image)
        # get the gradients of the generated image wrt the loss
        grads = K.gradients(loss, self.net_input)
        outputs = [loss]
        if type(grads) in {list, tuple}:
            outputs += grads
        else:
            outputs.append(grads)
        f_inputs = [self.net_input]
        for nnf in self.feature_nnfs:
            f_inputs.append(nnf.placeholder)
        self.f_outputs = K.function(f_inputs, outputs)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def getwhere(x):
    ''' Calculate the "where" mask that contains switches indicating which
    index contained the max value when MaxPool2D was applied.  Using the
    gradient of the sum is a nice trick to keep everything high level.'''
    y_prepool, y_postpool = x
    return K.gradients(K.sum(y_postpool), y_prepool)
项目:deform-conv    作者:felixlaumon    | 项目源码 | 文件源码
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        total_loss = self.model.total_loss
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    # dense_1/bias:0 > dense_1/bias_0
                    name = weight.name.replace(':', '_')
                    tf.summary.histogram(name, weight)
                    tf.summary.histogram(
                        '{}_gradients'.format(name),
                        K.gradients(total_loss, [weight])[0]
                    )
                    if self.write_images:
                        w_img = tf.squeeze(weight)
                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)
                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)
                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)
                        tf.summary.image(name, w_img)

                if hasattr(layer, 'output'):
                    tf.summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf.summary.merge_all()

        if self.write_graph:
            self.writer = tf.summary.FileWriter(self.log_dir,
                                                self.sess.graph)
        else:
            self.writer = tf.summary.FileWriter(self.log_dir)
项目:deform-conv    作者:felixlaumon    | 项目源码 | 文件源码
def test_tf_batch_map_offsets_grad():
    np.random.seed(42)
    input = np.random.random((4, 100, 100))
    offsets = np.random.random((4, 100, 100, 2)) * 2

    input = K.variable(input)
    offsets = K.variable(offsets)

    tf_mapped_vals = tf_batch_map_offsets(input, offsets)
    grad = K.gradients(tf_mapped_vals, input)[0]
    grad = K.eval(grad)
    assert not np.allclose(grad, 0)
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def iterate_fxn(model, layer_dict, layer_name='conv5_1'):
    input_tensor = model.input

    # this is a placeholder tensor that will contain our generated images

    # build a loss function that maximizes the activation
    # of the layer given by layer name
    x = layer_dict[layer_name].output
    shape = layer_dict[layer_name].output_shape

    loss = K.variable(0.)
    # we avoid border artifacts by only involving non-border pixels in the loss
    loss_weight_activity = 1.0
    loss_weight_continuity = 1.0
    loss_weight_l2 = 1.0

    if K.image_data_format()== 'channels_first':
        #loss -= loss_weight_activity*K.sum(K.square(x[:, :, 2: shape[2] - 2, 2: shape[3] - 2])) / np.prod(shape[1:])
        loss += loss_weight_activity*K.sum(K.square(x)) / np.prod(shape[1:])
    else:
        #loss -= loss_weight_activity*K.sum(K.square(x[:, 2: shape[1] - 2, 2: shape[2] - 2, :])) / np.prod(shape[1:])
        loss += loss_weight_activity*K.sum(K.square(x)) / np.prod(shape[1:])

    # add continuity loss (gives image local coherence, can result in an artful blur)
    loss += loss_weight_continuity * total_variation_norm(input_tensor) / np.prod(shape[1:])
    # add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
    loss += loss_weight_l2 * (K.sum(K.square(input_tensor)) / np.prod(shape[1:]))
    # compute the gradient of the input picture wrt this loss
    grads = K.gradients(loss, input_tensor)[0]

    # normalization trick: we normalize the gradient
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    # this function returns the loss and grads given the input picture
    iterate = K.function([input_tensor, K.learning_phase()], [loss, grads])
    return iterate
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def iterate_neuron(model, layer_dict, neuron, layer_name='conv5_1'):
    input_tensor = model.input

    # this is a placeholder tensor that will contain our generated images

    # build a loss function that maximizes the activation
    # of the nth filter of the layer considered
    print('X shape', layer_dict[layer_name].output_shape)
    #fc1_in =  layer_dict['fc1'].input
    x = layer_dict[layer_name].output[:,neuron,:,:]

    loss = K.variable(0.)
    loss_weight_activity = 1.0
    loss_weight_continuity = 1.0
    loss_weight_l2 = 0.0

    if K.image_data_format()== 'channels_first':
        # we avoid border artifacts by only involving non-border pixels in the loss
        #loss -= loss_weight_activity*K.sum(K.square(x[:, :, 2: shape[2] - 2, 2: shape[3] - 2])) / np.prod(shape[1:])
        loss += loss_weight_activity*K.sum(K.square(x)) 
    else:
        #loss -= loss_weight_activity*K.sum(K.square(x[:, 2: shape[1] - 2, 2: shape[2] - 2, :])) / np.prod(shape[1:])
        loss += loss_weight_activity*K.sum(K.square(x))

    # add continuity loss (gives image local coherence, can result in an artful blur)
    #loss += loss_weight_continuity * total_variation_norm(input_tensor)
    # add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
    #loss -= loss_weight_l2 * (K.sum(K.square(input_tensor)))
    # compute the gradient of the input picture wrt this loss
    grads = K.gradients(loss, input_tensor)[0]

    # normalization trick: we normalize the gradient
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    # this function returns the loss and grads given the input picture
    iterate = K.function([input_tensor], [loss, grads])
    return iterate
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def grad_towards_input(model, desired_input, layer_dict, layer_name='conv5_1'):
    input_tensor = model.input

    # this is a placeholder tensor that will contain our generated images

    # build a loss function that maximizes the activation
    # of the nth filter of the layer considered
    x = layer_dict[layer_name].output
    shape = layer_dict[layer_name].output_shape

    loss = K.variable(0.)
    # we avoid border artifacts by only involving non-border pixels in the loss
    loss_weight_activity = 1.0
    loss_weight_continuity = 0.0
    loss_weight_l2 = 0.0

    loss -= loss_weight_activity*K.sum(K.square(desired_input-input_tensor)) / np.prod(shape[1:])

    # add continuity loss (gives image local coherence, can result in an artful blur)
    loss += loss_weight_continuity * total_variation_norm(input_tensor) / np.prod(shape[1:])
    # add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
    loss += loss_weight_l2 * (K.sum(K.square(input_tensor)) / np.prod(shape[1:]))
    # compute the gradient of the input picture wrt this loss
    grads = K.gradients(loss, input_tensor)[0]

    # normalization trick: we normalize the gradient
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    # this function returns the loss and grads given the input picture
    iterate = K.function([input_tensor, K.learning_phase()], [loss, grads])
    return iterate
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def net_grad_towards_input(model, desired_input, layer_dict, layer_name='conv5_1'):
    input_tensor = model.input

    # this is a placeholder tensor that will contain our generated images

    # build a loss function that maximizes the activation
    # of the nth filter of the layer considered
    loss = K.variable(0.)
    # we avoid border artifacts by only involving non-border pixels in the loss
    loss_weight_activity = 1.0
    loss_weight_continuity = 0.0
    loss_weight_l2 = 0.0

    loss -= loss_weight_activity*K.sum(K.square(desired_input-input_tensor)) / np.prod(shape[1:])

    # add continuity loss (gives image local coherence, can result in an artful blur)
    loss += loss_weight_continuity * total_variation_norm(input_tensor) / np.prod(shape[1:])
    # add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
    loss += loss_weight_l2 * (K.sum(K.square(input_tensor)) / np.prod(shape[1:]))
    # compute the gradient of the input picture wrt this loss
    grads = K.gradients(loss, model.trainable_weights)[0]

    print('grads shape is:', grads.output_shape)
    # normalization trick: we normalize the gradient
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)

    # this function returns the loss and grads given the input picture
    iterate = K.function([model], [loss, grads])
    return iterate


# util function to convert a tensor into a valid image