Python tensorflow.python.framework.ops 模块,RegisterGradient() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用tensorflow.python.framework.ops.RegisterGradient()

项目:DMNN    作者:magnux    | 项目源码 | 文件源码
def tf_mod(x, y, name=None):
    """Differentiable mod based in numpy
    Args
        x: first argument
        y: second argument
    Returns
        mod between x and y
    """

    def np_mod(x, y):
        return np.mod(x, y, dtype=np.float32)

    def modgrad(op, grad):
        x = op.inputs[0] # the first argument (normally you need those to calculate the gradient, like the gradient of x^2 is 2x. )
        y = op.inputs[1] # the second argument

        return grad * 1, grad * 0 #the propagated gradient with respect to the first and second argument respectively

    def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
        # Need to generate a unique name to avoid duplicates:
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))

        tf.RegisterGradient(rnd_name)(grad)  # see _MySquareGrad for grad example
        g = tf.get_default_graph()
        with g.gradient_override_map({"PyFunc": rnd_name}):
            return tf.py_func(func, inp, Tout, stateful=stateful, name=name)

    with ops.name_scope(name, "mod", [x,y]) as name:
        z = py_func(np_mod,
                    [x,y],
                    [tf.float32],
                    name=name,
                    grad=modgrad)  # <-- here's the call to the gradient
        return tf.reshape(z[0], tf.shape(x))
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.neg(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:tf_cnnvis    作者:InFoCusp    | 项目源码 | 文件源码
def _register_custom_gradients():
    """
    Register Custom Gradients.
    """
    global is_Registered

    if not is_Registered:
        # register LRN gradients
        @ops.RegisterGradient("Customlrn")
        def _CustomlrnGrad(op, grad):
            return grad

        # register Relu gradients
        @ops.RegisterGradient("GuidedRelu")
        def _GuidedReluGrad(op, grad):
            return tf.where(0. < grad, gen_nn_ops._relu_grad(grad, op.outputs[0]), tf.zeros_like(grad))

        is_Registered = True


# save given graph object as meta file
项目:SSPP-DAN    作者:csehong    | 项目源码 | 文件源码
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.neg(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def __call__(self, x, gamma=1.0):
        grad_name = "GradientReverse%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _gradients_reverse(op, grad):
            return [tf.neg(grad) * gamma]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def __call__(self, x, gamma=1.0):
        grad_name = "GradientReverse%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _gradients_reverse(op, grad):
            return [tf.negative(grad) * gamma]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def _GuidedEluGrad(op, grad):
    return tf.select(0. < grad, gen_nn_ops._elu_grad(grad, op.outputs[0]), tf.zeros(tf.shape(grad)))

# @ops.RegisterGradient("MaxPoolWithArgmax")
# def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
#     return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
#                                                  grad,
#                                                  op.outputs[1],
#                                                  op.get_attr("ksize"),
#                                                  op.get_attr("strides"),
#                                                  padding=op.get_attr("padding"))
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def _GuidedEluGrad(op, grad):
    return tf.select(0. < grad, gen_nn_ops._elu_grad(grad, op.outputs[0]), tf.zeros(tf.shape(grad)))

# @ops.RegisterGradient("MaxPoolWithArgmax")
# def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
#     return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
#                                                  grad,
#                                                  op.outputs[1],
#                                                  op.get_attr("ksize"),
#                                                  op.get_attr("strides"),
#                                                  padding=op.get_attr("padding"))
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def _GuidedEluGrad(op, grad):
    return tf.select(0. < grad, gen_nn_ops._elu_grad(grad, op.outputs[0]), tf.zeros(tf.shape(grad)))

# @ops.RegisterGradient("MaxPoolWithArgmax")
# def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
#     return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
#                                                  grad,
#                                                  op.outputs[1],
#                                                  op.get_attr("ksize"),
#                                                  op.get_attr("strides"),
#                                                  padding=op.get_attr("padding"))
项目:keras-grad-cam    作者:jacobgil    | 项目源码 | 文件源码
def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, grad):
            dtype = op.inputs[0].dtype
            return grad * tf.cast(grad > 0., dtype) * \
                tf.cast(op.inputs[0] > 0., dtype)
项目:thinstack-rl    作者:hans    | 项目源码 | 文件源码
def _thin_stack_lookup_gradient(op, grad_stack1, grad_stack2, grad_buf_top, _):
    stack, buffer, _, _, buffer_cursors, transitions = op.inputs

    stack2_ptrs = op.outputs[3]
    t = op.get_attr("timestep")

    batch_size = buffer_cursors.get_shape().as_list()[0]
    num_tokens = buffer.get_shape().as_list()[0] / batch_size
    batch_range = math_ops.range(batch_size)
    batch_range_i = tf.to_float(batch_range)

    grad_stack_name = "grad_stack_%i_%s" % (t, str(uuid.uuid4())[:15])
    grad_buffer_name = "grad_buffer_%i_%s" % (t, str(uuid.uuid4())[:15])
    grad_stack = gen_state_ops._temporary_variable(stack.get_shape().as_list(), tf.float32, grad_stack_name)
    grad_buffer = gen_state_ops._temporary_variable(buffer.get_shape().as_list(), tf.float32, grad_buffer_name)
    grad_stack = tf.assign(grad_stack, tf.zeros_like(grad_stack))
    grad_buffer = tf.assign(grad_buffer, tf.zeros_like(grad_buffer))

    updates = []

    # Write grad_stack1 into block (t - 1)
    if t >= 1:
      in_cursors = (t - 1) * batch_size + batch_range
      grad_stack = tf.scatter_add(grad_stack, in_cursors, grad_stack1)

    # Write grad_stack2 using stored lookup pointers
    grad_stack = floaty_scatter_add(grad_stack, stack2_ptrs * batch_size + batch_range_i, grad_stack2)

    # Use buffer_cursors to scatter grads into buffer.
    buffer_ptrs = tf.minimum((float) (num_tokens * batch_size) - 1.0,
                              buffer_cursors * batch_size + batch_range_i)
    grad_buffer = floaty_scatter_add(grad_buffer, buffer_ptrs, grad_buf_top)

    with tf.control_dependencies([grad_stack, grad_buffer]):
      grad_stack = gen_state_ops._destroy_temporary_variable(grad_stack, grad_stack_name)
      grad_buffer = gen_state_ops._destroy_temporary_variable(grad_buffer, grad_buffer_name)

      with tf.control_dependencies([grad_stack, grad_buffer]):
        return grad_stack, grad_buffer, None, None, None, None

# Deprecated custom gradient op.
#@ops.RegisterGradient("ThinStackLookup")