Python tensorflow 模块,neg() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用tensorflow.neg()

项目:cartpoleplusplus    作者:matpalm    | 项目源码 | 文件源码
def init_ops_for_training(self, critic):
    # actors gradients are the gradients for it's output w.r.t it's vars using initial
    # gradients provided by critic. this requires that critic was init'd with an
    # input_action = actor.output_action (which is natural anyway)
    # we wrap the optimiser in namespace since we don't want this as part of copy to
    # target networks.
    # note that we negate the gradients from critic since we are trying to maximise
    # the q values (not minimise like a loss)
    with tf.variable_scope("optimiser"):
      gradients = tf.gradients(self.output_action,
                               self.trainable_model_vars(),
                               tf.neg(critic.q_gradients_wrt_actions()))
      gradients = zip(gradients, self.trainable_model_vars())
      # potentially clip and wrap with debugging
      gradients = util.clip_and_debug_gradients(gradients, opts)
      # apply
      optimiser = tf.train.GradientDescentOptimizer(opts.actor_learning_rate)
      self.train_op = optimiser.apply_gradients(gradients)
项目:tf_base    作者:ozansener    | 项目源码 | 文件源码
def loss_function(self):
        pos_diff = self.anchor - self.positive
        neg_diff = self.anchor - self.negative

        pos_dist = tf.reduce_sum(tf.mul(pos_diff, pos_diff), 1)
        neg_dist = tf.reduce_sum(tf.mul(neg_diff, neg_diff), 1)

        triplet = tf.add(self.ALPHA, tf.add(pos_dist, tf.neg(neg_dist)))
        return tf.reduce_sum(tf.nn.relu(triplet))
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.neg(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:SSPP-DAN    作者:csehong    | 项目源码 | 文件源码
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.neg(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def __call__(self, x, gamma=1.0):
        grad_name = "GradientReverse%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _gradients_reverse(op, grad):
            return [tf.neg(grad) * gamma]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    super(CoreUnaryOpsTest, self).setUp()

    self.ops = [
        ('abs', operator.abs, tf.abs, core.abs_function),
        ('neg', operator.neg, tf.neg, core.neg),
        # TODO(shoyer): add unary + to core TensorFlow
        ('pos', None, None, None),
        ('sign', None, tf.sign, core.sign),
        ('reciprocal', None, tf.reciprocal, core.reciprocal),
        ('square', None, tf.square, core.square),
        ('round', None, tf.round, core.round_function),
        ('sqrt', None, tf.sqrt, core.sqrt),
        ('rsqrt', None, tf.rsqrt, core.rsqrt),
        ('log', None, tf.log, core.log),
        ('exp', None, tf.exp, core.exp),
        ('log', None, tf.log, core.log),
        ('ceil', None, tf.ceil, core.ceil),
        ('floor', None, tf.floor, core.floor),
        ('cos', None, tf.cos, core.cos),
        ('sin', None, tf.sin, core.sin),
        ('tan', None, tf.tan, core.tan),
        ('acos', None, tf.acos, core.acos),
        ('asin', None, tf.asin, core.asin),
        ('atan', None, tf.atan, core.atan),
        ('lgamma', None, tf.lgamma, core.lgamma),
        ('digamma', None, tf.digamma, core.digamma),
        ('erf', None, tf.erf, core.erf),
        ('erfc', None, tf.erfc, core.erfc),
        ('lgamma', None, tf.lgamma, core.lgamma),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    self.test_lt = core.LabeledTensor(
        tf.cast(self.original_lt, tf.float32) / total_size,
        self.original_lt.axes)
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def buildRotations(n, rand_or_identity,num_rots=None):
    print("num_rots: %d" %num_rots)
    num_rots = num_rots or (n-1)
    n_prime = int(n*(n-1)//2*num_rots/(n-1))
    outputs = []

    with vs.variable_scope("Build_Rotations"):

        (indices, values_idxs) = rotationPreprocess(n, num_rots)
        if rand_or_identity:
            print("Initialization: Random")
            thetas = vs.get_variable(initializer=tf.random_uniform([n_prime, 1], 0, 2*math.pi),
                    name="Thetas_RandInit", dtype=tf.float32)
        else:
            print("Initialization: Identity")
            thetas = vs.get_variable(initializer=tf.zeros([n_prime, 1]),
                    name="Thetas_OnesInit", dtype=tf.float32)
        cos = tf.cos(thetas)
        sin = tf.sin(thetas)
        nsin = tf.neg(sin)

        thetas_concat = tf.concat(0, [cos,sin,nsin])

        gathered_values = tf.squeeze(tf.gather(thetas_concat, values_idxs))
        shape = tf.constant([n, n], dtype=tf.int64)

        splt_values = tf.split(0, num_rots, gathered_values)
        splt_indices = tf.split(0, num_rots, indices)

        shape = tf.constant([n,n], dtype=tf.int64)
        for i in range(num_rots):
            curr_indices = splt_indices[i]
            curr_values = splt_values[i]
            sparse_rot = tf.SparseTensor(indices=curr_indices, values=curr_values, shape=shape)
            outputs.append(sparse_rot)
    print("buildRotations output length: %d" % len(outputs))
    return outputs
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def rotationTransform(X, n, scope, num_rots=None):
    num_rots = num_rots or (n-1)
    n_prime = int(n*(n-1)//2*num_rots/(n-1))
    outputs = []

    with vs.variable_scope(scope or "RotationTransform"):

        for i, (name, x) in enumerate(X):
            (indices, values_idxs) = rotationPreprocess(n, num_rots)
            thetas = vs.get_variable(initializer=tf.random_uniform([n_prime, 1], 0, 2*math.pi),
                    name="Thetas"+str(i)+name, dtype=tf.float32)

            cos = tf.cos(thetas)
            sin = tf.sin(thetas)
            nsin = tf.neg(sin)

            thetas_concat = tf.concat(0, [cos,sin,nsin])

            gathered_values = tf.squeeze(tf.gather(thetas_concat, values_idxs))
            shape = tf.constant([n, n], dtype=tf.int64)

            splt_values = tf.split(0, num_rots, gathered_values)
            splt_indices = tf.split(0, num_rots, indices)

            shape = tf.constant([n,n], dtype=tf.int64)
            for i in range(num_rots):
                curr_indices = splt_indices[i]
                curr_values = splt_values[i]
                sparse_rot = tf.SparseTensor(indices=curr_indices, values=curr_values, shape=shape)
                x = tf.sparse_tensor_dense_matmul(sparse_rot, x)
            outputs.append(x)
    return outputs