Python tensorflow 模块,subtract() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.subtract()

项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def rotate_points(orig_points, angle, w, h):
    """Return rotated points

    Args:
        orig_points: 'Tensor' with shape [N,2], each entry is point (x,y)
        angle: rotate radians

    Returns:
        'Tensor' with shape [N,2], with rotated points
    """

    # rotation
    rotate_mat = tf.stack([[tf.cos(angle) / w, tf.sin(angle) / h],
                           [-tf.sin(angle) / w, tf.cos(angle) / h]])

    # shift coord
    orig_points = tf.subtract(orig_points, 0.5)

    orig_points = tf.stack([orig_points[:, 0] * w,
                            orig_points[:, 1] * h], axis=1)
    print(orig_points)
    rotated_points = tf.matmul(orig_points, rotate_mat) + 0.5

    return rotated_points
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def create_training_batch(serialized_example, cfg, add_summaries):

    features = get_region_data(serialized_example, cfg, fetch_ids=False,
                               fetch_labels=True, fetch_text_labels=False)

    original_image = features['image']
    bboxes = features['bboxes']
    labels = features['labels']

    distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)

    distorted_inputs = tf.subtract(distorted_inputs, 0.5)
    distorted_inputs = tf.multiply(distorted_inputs, 2.0)

    names = ('inputs', 'labels')
    tensors = [distorted_inputs, labels]
    return [names, tensors]
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def create_classification_batch(serialized_example, cfg, add_summaries):

    features = get_region_data(serialized_example, cfg, fetch_ids=True,
                               fetch_labels=False, fetch_text_labels=False)

    original_image = features['image']
    bboxes = features['bboxes']
    ids = features['ids']

    distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)

    distorted_inputs = tf.subtract(distorted_inputs, 0.5)
    distorted_inputs = tf.multiply(distorted_inputs, 2.0)

    names = ('inputs', 'ids')
    tensors = [distorted_inputs, ids]
    return [names, tensors]
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def __init__(self, actions):
        self.replayMemory = deque()
        self.timeStep = 0
        self.epsilon = INITIAL_EPSILON
        self.actions = actions
        self.files = 0
        self.currentQNet = QNet(len(actions))
        self.targetQNet = QNet(len(actions))

        self.actionInput = tf.placeholder("float", [None, len(actions)],name="actions_one_hot")
        self.yInput = tf.placeholder("float", [None],name="y")

        self.action_mask = tf.multiply(self.currentQNet.QValue, self.actionInput)
        self.Q_action = tf.reduce_sum(self.action_mask, reduction_indices=1)

        self.delta = delta = tf.subtract(self.Q_action, self.yInput)

        self.loss = tf.where(tf.abs(delta) < 1.0, 0.5 * tf.square(delta), tf.abs(delta) - 0.5)
        #self.loss = tf.square(tf.subtract( self.Q_action, self.yInput ))

        self.cost = tf.reduce_mean(self.loss)
        self.trainStep = tf.train.RMSPropOptimizer(learning_rate=RMS_LEARNING_RATE,momentum=RMS_MOMENTUM,epsilon= RMS_EPSILON,decay=RMS_DECAY).minimize(
            self.cost)
        #
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def image_preprocessing(image, train):
    """Decode and preprocess one image for evaluation or training.

    Args:
      image: JPEG
      train: boolean
    Returns:
      3-D float Tensor containing an appropriately scaled image

    Raises:
       ValueError: if user does not provide bounding box
    """
    with tf.name_scope('image_preprocessing'):
        if train:
            image = tf.image.random_flip_left_right(image)
            image = tf.image.random_brightness(image, 0.6)
            if FLAGS.image_channel >= 3:
                image = tf.image.random_saturation(image, 0.6, 1.4)
        # Finally, rescale to [-1,1] instead of [0, 1)
        image = tf.subtract(image, 0.5)
        image = tf.multiply(image, 2.0)
        image = tf.image.per_image_standardization(image)
        return image
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def build_graph(self, actor, critic, cfg):
        self.ph_action = graph.Placeholder(np.float32, shape=(None, actor.action_size), name="ph_action")
        self.ph_advantage = graph.Placeholder(np.float32, shape=(None,), name="ph_adv")
        self.ph_discounted_reward = graph.Placeholder(np.float32, shape=(None,), name="ph_edr")

        mu, sigma2 = actor.node
        sigma2 += tf.constant(1e-8)

        # policy entropy
        self.entropy = -tf.reduce_mean(0.5 * (tf.log(2. * np.pi * sigma2) + 1.))

        # policy loss (calculation)
        b_size = tf.to_float(tf.size(self.ph_action.node) / actor.action_size)
        log_pi = tf.log(sigma2)
        x_prec = tf.exp(-log_pi)
        x_diff = tf.subtract(self.ph_action.node, mu)
        x_power = tf.square(x_diff) * x_prec * -0.5
        gaussian_nll = (tf.reduce_sum(log_pi, axis=1)
                        + b_size * tf.log(2. * np.pi)) / 2. - tf.reduce_sum(x_power, axis=1)

        self.policy_loss = -(tf.reduce_mean(gaussian_nll * self.ph_advantage.node) + cfg.entropy_beta * self.entropy)

        # value loss
        # (Learning rate for the Critic is sized by critic_scale parameter)
        self.value_loss = cfg.critic_scale * tf.reduce_mean(tf.square(self.ph_discounted_reward.node - critic.node))
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def SparseSoftmaxCrossEntropyWithLogits_FwGrad(op,
                                               dx,
                                               dy,
                                               _op_table=None,
                                               _grad_table=None):
  """Forward gradient operator of sparse softmax cross entropy."""
  grad = op.outputs[1]  # This is already computed in the forward pass.
  x = op.inputs[0]
  if dx is None:
    return None
  y = tf.nn.softmax(x)
  grad_grad = tf.subtract(
      tf.multiply(y, dx),
      tf.multiply(
          y, tf.reduce_sum(
              tf.multiply(dx, y), [1], keep_dims=True)))
  return tf.reduce_sum(tf.multiply(grad, dx), [1]), grad_grad
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_height, output_width)
  image.set_shape([output_height, output_width, 1])
  image = tf.subtract(image, 0.5)
  image = tf.multiply(image, 2.0)
  return image
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def ternary_decoder(encoded_data, scaler, shape):
  """Decoding the signs to float format """
  a = tf.cast(encoded_data, tf.int32)
  a_split1 = tf.mod(a,4)
  a_split2 = tf.to_int32(tf.mod(a/4,4))
  a_split3 = tf.to_int32(tf.mod(a/16,4))
  a_split4 = tf.to_int32(tf.mod(a/64,4))
  a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
  real_size = tf.reduce_prod(shape)
  a = tf.to_float(a)
  a = tf.gather(a, tf.range(0,real_size))

  a = tf.reshape(a, shape)
  a = tf.subtract(a,1)
  decoded = a*scaler
  return decoded
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def _accur(self, pred, gtMap, num_image):
        """ Given a Prediction batch (pred) and a Ground Truth batch (gtMaps),
        returns one minus the mean distance.
        Args:
            pred        : Prediction Batch (shape = num_image x 64 x 64)
            gtMaps      : Ground Truth Batch (shape = num_image x 64 x 64)
            num_image   : (int) Number of images in batch
        Returns:
            (float)
        """
        err = tf.to_float(0)
        for i in range(num_image):
            err = tf.add(err, self._compute_err(pred[i], gtMap[i]))
        return tf.subtract(tf.to_float(1), err/num_image)

    # MULTI CONTEXT ATTENTION MECHANISM
    # WORK IN PROGRESS DO NOT USE THESE METHODS
    # BASED ON:
    # Multi-Context Attention for Human Pose Estimation
    # Authors: Xiao Chu, Wei Yang, Wanli Ouyang, Cheng Ma, Alan L. Yuille, Xiaogang Wang
    # Paper: https://arxiv.org/abs/1702.07432
    # GitHub Torch7 Code: https://github.com/bearpaw/pose-attention
项目:MachineLearningTutorial    作者:SpikeKing    | 项目源码 | 文件源码
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(),
                 scale=0.1):
        self.n_input = n_input  # ??????
        self.n_hidden = n_hidden  # ??????????????
        self.transfer = transfer_function  # ????
        self.scale = tf.placeholder(tf.float32)  # ?????????????feed???training_scale
        self.training_scale = scale  # ??????
        network_weights = self._initialize_weights()  # ???????????w1/b1????w2/b2
        self.weights = network_weights  # ??

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])  # ??feed???
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                                                     self.weights['w1']),
                                           self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost?0.5*(x - x_)^2???
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)  # ???
项目:MachineLearningTutorial    作者:SpikeKing    | 项目源码 | 文件源码
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(),
                 dropout_probability=0.95):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.dropout_probability = dropout_probability
        self.keep_prob = tf.placeholder(tf.float32)

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']),
                                           self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
项目:MachineLearningTutorial    作者:SpikeKing    | 项目源码 | 文件源码
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
    neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
    tot_dist = tf.sum(tf.subtract(pos_dist, neg_dist), alpha)
    loss = tf.reduce_mean(tf.maximum(tot_dist, 0.0), 0)
    return loss
项目:CozmoSelfDriveToyUsingCNN    作者:benjafire    | 项目源码 | 文件源码
def __init__(self):
        #self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
        self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
        self.y_ = tf.placeholder(tf.float32, [None, 2])
        (self.h_conv1, _) = conv_layer(self.x, conv=(5, 5), stride=2, n_filters=24, use_bias=True)
        (self.h_conv2, _) = conv_layer(self.h_conv1, conv=(5, 5), stride=2, n_filters=36, use_bias=True)
        (self.h_conv3, _) = conv_layer(self.h_conv2, conv=(5, 5), stride=2, n_filters=48, use_bias=True)
        (self.h_conv4, _) = conv_layer(self.h_conv3, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
        (self.h_conv5, _) = conv_layer(self.h_conv4, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
        self.h_conv5_flat = flattened(self.h_conv5)
        (self.h_fc1_drop, _, _, self.keep_prob_fc1) = fc_layer(x=self.h_conv5_flat, n_neurons=512, activation=tf.nn.relu, use_bias=True, dropout=True)
        (self.h_fc2_drop, _, _, self.keep_prob_fc2) = fc_layer(self.h_fc1_drop, 100, tf.nn.relu, True, True)
        (self.h_fc3_drop, _, _, self.keep_prob_fc3) = fc_layer(self.h_fc2_drop, 50, tf.nn.relu, True, True)
        (self.h_fc4_drop, _, _, self.keep_prob_fc4) = fc_layer(self.h_fc3_drop, 10, tf.nn.relu, True, True)
        W_fc5 = weight_variable([10, 2])
        b_fc5 = bias_variable([2])
        self.y_out = tf.matmul(self.h_fc4_drop, W_fc5) + b_fc5
        self.loss = tf.reduce_mean(tf.abs(tf.subtract(self.y_, self.y_out)))
项目:siamese_tf_mnist    作者:ywpkwon    | 项目源码 | 文件源码
def loss_with_spring(self):
        margin = 5.0
        labels_t = self.y_
        labels_f = tf.subtract(1.0, self.y_, name="1-yi")          # labels_ = !labels;
        eucd2 = tf.pow(tf.subtract(self.o1, self.o2), 2)
        eucd2 = tf.reduce_sum(eucd2, 1)
        eucd = tf.sqrt(eucd2+1e-6, name="eucd")
        C = tf.constant(margin, name="C")
        # yi*||CNN(p1i)-CNN(p2i)||^2 + (1-yi)*max(0, C-||CNN(p1i)-CNN(p2i)||^2)
        pos = tf.multiply(labels_t, eucd2, name="yi_x_eucd2")
        # neg = tf.multiply(labels_f, tf.subtract(0.0,eucd2), name="yi_x_eucd2")
        # neg = tf.multiply(labels_f, tf.maximum(0.0, tf.subtract(C,eucd2)), name="Nyi_x_C-eucd_xx_2")
        neg = tf.multiply(labels_f, tf.pow(tf.maximum(tf.subtract(C, eucd), 0), 2), name="Nyi_x_C-eucd_xx_2")
        losses = tf.add(pos, neg, name="losses")
        loss = tf.reduce_mean(losses, name="loss")
        return loss
项目:tf-slim-mnist    作者:mnuke    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:Faster-RCNN_TF    作者:smallcorgi    | 项目源码 | 文件源码
def _modified_smooth_l1(self, sigma, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights):
        """
            ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
            SmoothL1(x) = 0.5 * (sigma * x)^2,    if |x| < 1 / sigma^2
                          |x| - 0.5 / sigma^2,    otherwise
        """
        sigma2 = sigma * sigma

        inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))

        smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)
        smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)
        smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)
        smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),
                                  tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))

        outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)

        return outside_mul
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:Classification_Nets    作者:BobLiu20    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: 2-D `tensor` [batch_size, embedding_size], the embeddings for the anchor images.
      positive: 2-D `tensor` [batch_size, embedding_size], the embeddings for the positive images.
      negative: 2-D `tensor` [batch_size, embedding_size], the embeddings for the negative images.
      alpha: positive to negative triplet distance margin

    Returns:
      the triplet loss.
    """
    with tf.name_scope(name):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
        basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
    return loss
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def vggnet_input(im_tf):
    im_tf = tf.image.convert_image_dtype(im_tf, dtype=tf.float32)
    # im_tf = tf.image.central_crop(im_tf, central_fraction=0.875)
    # im_tf = tf.expand_dims(im_tf, 0)
    # im_tf = tf.image.resize_bilinear(im_tf, [224, 224], align_corners=False)
    # im_tf = tf.squeeze(im_tf, [0])
    im_tf = tf.subtract(im_tf, 0.5)
    im_tf = tf.multiply(im_tf, 2.0)
    im_tf = im_tf * 255.0
    r_, g_, b_ = tf.split(im_tf, 3, axis=2)
    r_ = r_ - VGG_MEAN[2]
    g_ = b_ - VGG_MEAN[1]
    b_ = b_ - VGG_MEAN[0]
    im_tf = tf.concat([r_, g_, b_], axis=2)
    # im_tf = tf.expand_dims(im_tf, 0)
    return im_tf
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:AdaptiveOptim    作者:tomMoral    | 项目源码 | 文件源码
def _get_step(self, inputs):
        Z, Y, X, theta, lmbd = self.inputs
        K, p = self.D.shape
        L = self.L
        with tf.name_scope("ISTA_iteration"):
            self.S = tf.constant(np.eye(K, dtype=np.float32) - self.S0/L,
                                 shape=[K, K], name='S')
            self.We = tf.constant(self.D.T/L, shape=[p, K],
                                  dtype=tf.float32, name='We')
            hk = tf.matmul(Y, self.S) + tf.matmul(X, self.We)
            self.step_FISTA = Zk = soft_thresholding(hk, lmbd/L)
            # self.theta_k = tk = (tf.sqrt(theta*theta+4) - theta)*theta/2
            self.theta_k = tk = (1 + tf.sqrt(1 + 4*theta*theta))/2
            dZ = tf.subtract(Zk, Z)
            # self.Yk = Zk + tk*(1/theta-1)*dZ
            self.Yk = Zk + (theta-1)/tk*dZ
            self.dz = tf.reduce_mean(tf.reduce_sum(
                dZ*dZ, reduction_indices=[1]))

            step = tf.tuple([Zk, tk, self.Yk])
        return step, self.dz
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def _build_q_head(self, input_state):
        self.w_value, self.b_value, self.value = layers.fc('fc_value', input_state, 1, activation='linear')
        self.w_adv, self.b_adv, self.advantage = layers.fc('fc_advantage', input_state, self.num_actions, activation='linear')

        self.output_layer = (
            self.value + self.advantage
            - tf.reduce_mean(
                self.advantage,
                axis=1,
                keep_dims=True
            )
        )

        q_selected_action = tf.reduce_sum(self.output_layer * self.selected_action_ph, axis=1)
        diff = tf.subtract(self.target_ph, q_selected_action)
        return self._value_function_loss(diff)
项目:fast-neural-style    作者:coder-james    | 项目源码 | 文件源码
def get_masks(origin_images, height, width, channels=3):
    """add horizon color lines and set empty"""
    quarty = tf.random_uniform([height/4, 1])
    prop = tf.scalar_mul(tf.convert_to_tensor(0.2), tf.ones([height/4, 1]))
    quarty = tf.round(tf.add(quarty, prop))
    y = tf.reshape(tf.stack([quarty, quarty, quarty, quarty], axis=1), [height, 1])
    mask = tf.matmul(y, tf.ones([1, width]))
    masks = tf.expand_dims(mask, 0)
    masks = tf.expand_dims(masks, -1)
    maskedimages = tf.mul(origin_images, masks)
    """add noise"""
    scale = tf.random_uniform([channels, height, 1])
    y = tf.subtract(tf.ones([height, 1]), y)
    y = tf.expand_dims(y, 0)
    y = tf.scalar_mul(tf.convert_to_tensor(255.), tf.multiply(scale, y))
    noise = tf.add(mask, tf.matmul(y, tf.ones([channels, 1, width])))
    noise = tf.pack(tf.split(value=noise, num_or_size_splits=noise.get_shape()[0], axis=0), axis=3)
    maskedimages = tf.add(maskedimages, noise)
    return maskedimages
项目:single-image-depth-estimation    作者:liuhyCV    | 项目源码 | 文件源码
def loss(logits, depths, invalid_depths):
    #304*228    55*74
    #576*172    27*142
    out_put_size = 55*74
    logits_flat = tf.reshape(logits, [-1, out_put_size])
    depths_flat = tf.reshape(depths, [-1, out_put_size])
    invalid_depths_flat = tf.reshape(invalid_depths, [-1, out_put_size])

    predict = tf.multiply(logits_flat, invalid_depths_flat)
    target = tf.multiply(depths_flat, invalid_depths_flat)

    d = tf.subtract(predict, target)
    square_d = tf.square(d)
    sum_square_d = tf.reduce_sum(square_d, 1)
    sum_d = tf.reduce_sum(d, 1)
    sqare_sum_d = tf.square(sum_d)

    cost = tf.reduce_mean(sum_square_d / out_put_size - 0.5*sqare_sum_d / math.pow(out_put_size, 2))

    tf.add_to_collection('losses', cost)



    #return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def iou(bbox_1, bbox_2):
  """Compute iou of a box with another box. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    bbox_1: 1-D with shape `[4]`.
    bbox_2: 1-D with shape `[4]`.

  Returns:
    IOU
  """
  lr = tf.minimum(bbox_1[3], bbox_2[3]) - tf.maximum(bbox_1[1], bbox_2[1])
  tb = tf.minimum(bbox_1[2], bbox_2[2]) - tf.maximum(bbox_1[0], bbox_2[0])
  lr = tf.maximum(lr, lr * 0)
  tb = tf.maximum(tb, tb * 0)
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bbox_1[3] - bbox_1[1]), (bbox_1[2] - bbox_1[0])) +
    tf.multiply((bbox_2[3] - bbox_2[1]), (bbox_2[2] - bbox_2[0])),
    intersection
  )
  iou = tf.div(intersection, union)
  return iou
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def iou(bbox_1, bbox_2):
  """Compute iou of a box with another box. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    bbox_1: 1-D with shape `[4]`.
    bbox_2: 1-D with shape `[4]`.

  Returns:
    IOU
  """
  lr = tf.minimum(bbox_1[3], bbox_2[3]) - tf.maximum(bbox_1[1], bbox_2[1])
  tb = tf.minimum(bbox_1[2], bbox_2[2]) - tf.maximum(bbox_1[0], bbox_2[0])
  lr = tf.maximum(lr, lr * 0)
  tb = tf.maximum(tb, tb * 0)
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bbox_1[3] - bbox_1[1]), (bbox_1[2] - bbox_1[0])) +
    tf.multiply((bbox_2[3] - bbox_2[1]), (bbox_2[2] - bbox_2[0])),
    intersection
  )
  iou = tf.div(intersection, union)
  return iou
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
项目:quantum-optimal-control    作者:SchusterLab    | 项目源码 | 文件源码
def get_inner_product(self,psi1,psi2):
        #Take 2 states psi1,psi2, calculate their overlap, for single vector
        state_num=self.sys_para.state_num

        psi_1_real = (psi1[0:state_num])
        psi_1_imag = (psi1[state_num:2*state_num])
        psi_2_real = (psi2[0:state_num])
        psi_2_imag = (psi2[state_num:2*state_num])
        # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude
        with tf.name_scope('inner_product'):
            ac = tf.multiply(psi_1_real,psi_2_real)
            bd = tf.multiply(psi_1_imag,psi_2_imag)
            bc = tf.multiply(psi_1_imag,psi_2_real)
            ad = tf.multiply(psi_1_real,psi_2_imag)
            reals = tf.square(tf.add(tf.reduce_sum(ac),tf.reduce_sum(bd)))
            imags = tf.square(tf.subtract(tf.reduce_sum(bc),tf.reduce_sum(ad)))
            norm = tf.add(reals,imags)
        return norm
项目:quantum-optimal-control    作者:SchusterLab    | 项目源码 | 文件源码
def get_inner_product_2D(self,psi1,psi2):
        #Take 2 states psi1,psi2, calculate their overlap, for arbitrary number of vectors
        # psi1 and psi2 are shaped as (2*state_num, number of vectors)
        state_num=self.sys_para.state_num

        psi_1_real = (psi1[0:state_num,:])
        psi_1_imag = (psi1[state_num:2*state_num,:])
        psi_2_real = (psi2[0:state_num,:])
        psi_2_imag = (psi2[state_num:2*state_num,:])
        # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude
        with tf.name_scope('inner_product'):
            ac = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_real),0)
            bd = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_imag),0)
            bc = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_real),0)
            ad = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_imag),0)
            reals = tf.square(tf.reduce_sum(tf.add(ac,bd))) # first trace inner product of all vectors, then squared
            imags = tf.square(tf.reduce_sum(tf.subtract(bc,ad)))
            norm = (tf.add(reals,imags))/(len(self.sys_para.states_concerned_list)**2)
        return norm
项目:quantum-optimal-control    作者:SchusterLab    | 项目源码 | 文件源码
def get_inner_product_3D(self,psi1,psi2):
        #Take 2 states psi1,psi2, calculate their overlap, for arbitrary number of vectors and timesteps
        # psi1 and psi2 are shaped as (2*state_num, time_steps, number of vectors)
        state_num=self.sys_para.state_num

        psi_1_real = (psi1[0:state_num,:])
        psi_1_imag = (psi1[state_num:2*state_num,:])
        psi_2_real = (psi2[0:state_num,:])
        psi_2_imag = (psi2[state_num:2*state_num,:])
        # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude
        with tf.name_scope('inner_product'):
            ac = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_real),0)
            bd = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_imag),0)
            bc = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_real),0)
            ad = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_imag),0)
            reals = tf.reduce_sum(tf.square(tf.reduce_sum(tf.add(ac,bd),1)))
            # first trace inner product of all vectors, then squared, then sum contribution of all time steps
            imags = tf.reduce_sum(tf.square(tf.reduce_sum(tf.subtract(bc,ad),1)))
            norm = (tf.add(reals,imags))/(len(self.sys_para.states_concerned_list)**2)
        return norm
项目:OSVOS-TensorFlow    作者:scaelles    | 项目源码 | 文件源码
def preprocess_img(image):
    """Preprocess the image to adapt it to network requirements
    Args:
    Image we want to input the network (W,H,3) numpy array
    Returns:
    Image ready to input the network (1,W,H,3)
    """
    if type(image) is not np.ndarray:
        image = np.array(Image.open(image), dtype=np.uint8)
    in_ = image[:, :, ::-1]
    in_ = np.subtract(in_, np.array((104.00699, 116.66877, 122.67892), dtype=np.float32))
    # in_ = tf.subtract(tf.cast(in_, tf.float32), np.array((104.00699, 116.66877, 122.67892), dtype=np.float32))
    in_ = np.expand_dims(in_, axis=0)
    # in_ = tf.expand_dims(in_, 0)
    return in_


# TO DO: Move preprocessing into Tensorflow
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:DeepCellSeg    作者:arbellea    | 项目源码 | 文件源码
def my_clustering_loss(net_out,feature_map):

    net_out_vec = tf.reshape(net_out,[-1,1])
    pix_num = net_out_vec.get_shape().as_list()[0]
    feature_vec = tf.reshape(feature_map,[pix_num,-1])
    net_out_vec = tf.div(net_out_vec, tf.reduce_sum(net_out_vec,keep_dims=True))
    not_net_out_vec = tf.subtract(tf.constant(1.),net_out_vec)
    mean_fg_var = tf.get_variable('mean_bg',shape = [feature_vec.get_shape().as_list()[1],1], trainable=False)
    mean_bg_var = tf.get_variable('mean_fg',shape = [feature_vec.get_shape().as_list()[1],1], trainable=False)

    mean_bg = tf.matmul(not_net_out_vec,feature_vec,True)
    mean_fg = tf.matmul(net_out_vec,feature_vec,True)

    feature_square = tf.square(feature_vec)

    loss = tf.add(tf.matmul(net_out_vec, tf.reduce_sum(tf.square(tf.subtract(feature_vec, mean_fg_var)), 1, True), True),
                  tf.matmul(not_net_out_vec, tf.reduce_sum(tf.square(tf.subtract(feature_vec,mean_bg_var)), 1, True), True))
    with tf.control_dependencies([loss]):
        update_mean = tf.group(tf.assign(mean_fg_var,mean_fg),tf.assign(mean_bg_var,mean_bg))
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean)

    return loss
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def subtract_mean_multi(image_tensors, mean_image_path, channels=NUM_CHANNELS, image_size=512):

    mean_image = tf.convert_to_tensor(mean_image_path, dtype=tf.string)
    mean_file_contents = tf.read_file(mean_image)
    mean_uint8 = tf.image.decode_png(mean_file_contents, channels=channels)
    mean_uint8.set_shape([image_size, image_size, channels])


    images_mean_free = []
    for image_tensor in image_tensors:
        image_tensor.set_shape([image_size, image_size, channels])
        image = tf.cast(image_tensor, tf.float32)

        #subtract mean image
        image_mean_free = tf.subtract(image, tf.cast(mean_uint8, tf.float32))
        images_mean_free.append(image_mean_free)

    return images_mean_free
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def single_input_image(image_str, mean_image_path, png_with_alpha=False, image_size=512):

    mean_image_str = tf.convert_to_tensor(mean_image_path, dtype=tf.string)

    file_contents = tf.read_file(image_str)
    if png_with_alpha:
        uint8image = tf.image.decode_png(file_contents, channels=4)
        uint8image.set_shape([image_size, image_size, 4])
    else:
        uint8image = tf.image.decode_image(file_contents, channels=NUM_CHANNELS)
        uint8image.set_shape([image_size, image_size, NUM_CHANNELS])
    image = tf.cast(uint8image, tf.float32)

    #subtract mean image
    mean_file_contents = tf.read_file(mean_image_str)
    if png_with_alpha:
        mean_uint8 = tf.image.decode_png(mean_file_contents, channels=4)
        mean_uint8.set_shape([image_size, image_size, 4])
    else:
        mean_uint8 = tf.image.decode_image(mean_file_contents, channels=NUM_CHANNELS)
        mean_uint8.set_shape([image_size, image_size, NUM_CHANNELS])
    image_mean_free = tf.subtract(image, tf.cast(mean_uint8, tf.float32))

    return image_mean_free
项目:X-ray-classification    作者:bendidi    | 项目源码 | 文件源码
def preprocess_for_eval(image, height, width,
                        central_fraction=0.875, scope=None):
  """Prepare one image for evaluation.
  If height and width are specified it would output an image with that size by
  applying resize_bilinear.
  If central_fraction is specified it would cropt the central fraction of the
  input image.
  Args:
    image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
      [0, 1], otherwise it would converted to tf.float32 assuming that the range
      is [0, MAX], where MAX is largest positive representable number for
      int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
    height: integer
    width: integer
    central_fraction: Optional Float, fraction of the image to crop.
    scope: Optional scope for name_scope.
  Returns:
    3-D float Tensor of prepared image.
  """
  with tf.name_scope(scope, 'eval_image', [image, height, width]):
    if image.dtype != tf.float32:
      image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    if central_fraction:
      image = tf.image.central_crop(image, central_fraction=central_fraction)

    if height and width:
      # Resize the image to the specified height and width.
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width],
                                       align_corners=False)
      image = tf.squeeze(image, [0])
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image
项目:A3C    作者:go2sea    | 项目源码 | 文件源码
def TD_loss(self):
        return tf.subtract(self.v_input, self.v)
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)

    tprs = np.zeros((nrof_folds,nrof_thresholds))
    fprs = np.zeros((nrof_folds,nrof_thresholds))
    accuracy = np.zeros((nrof_folds))

    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)

    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):

        # Find the best threshold for the fold
        acc_train = np.zeros((nrof_thresholds))
        for threshold_idx, threshold in enumerate(thresholds):
            _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
        best_threshold_index = np.argmax(acc_train)
        for threshold_idx, threshold in enumerate(thresholds):
            tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
        _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])

    tpr = np.mean(tprs,0)
    fpr = np.mean(fprs,0)
    return tpr, fpr, accuracy
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)

    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)

    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)

    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):

        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0

        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])

    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def loss_fn(W,b,x_data,y_target):
    logits = tf.subtract(tf.matmul(x_data, W),b)
    norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)
    classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
    total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term))
    return total_loss