Python tensorflow 模块,squared_difference() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.squared_difference()

项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def euclidean_distance(self):
        x = tf.argmax(tf.reduce_max(self.smoothed_sigm_network, 1), 1)
        y = tf.argmax(tf.reduce_max(self.smoothed_sigm_network, 2), 1)

        x = tf.cast(x, tf.float32)
        y = tf.cast(y, tf.float32)

        dy = tf.squeeze(self.desired_points[:, 0, :])
        dx = tf.squeeze(self.desired_points[:, 1, :])

        sx = tf.squared_difference(x, dx)
        sy = tf.squared_difference(y, dy)

        l2_dist = tf.sqrt(sx + sy)

        return l2_dist
项目:DeepFM    作者:dwt0317    | 项目源码 | 文件源码
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
项目:unreal-implementation    作者:404akhan    | 项目源码 | 文件源码
def _build_vr_network(self):
        self.vr_states = tf.placeholder(shape=[None, 80, 80, 4], dtype=tf.float32)
        self.vr_value_targets = tf.placeholder(shape=[None], dtype=tf.float32)

        with tf.variable_scope("shared", reuse=True):
            conv2 = self.build_shared_network(self.vr_states)

        fc1 = tf.contrib.layers.fully_connected(
            inputs=tf.contrib.layers.flatten(conv2),
            num_outputs=256,
            scope="fc1",
            reuse=True)

        self.vr_value = tf.contrib.layers.fully_connected(
            inputs=fc1,
            num_outputs=1,
            activation_fn=None,
            scope='logits_value',
            reuse=True)

        self.vr_value = tf.squeeze(self.vr_value, squeeze_dims=[1])

        self.vr_losses = tf.squared_difference(self.vr_value, self.vr_value_targets)
        self.vr_loss = tf.reduce_sum(self.vr_losses)
        self.vr_loss = self.pc_vr_lambda * self.vr_loss
项目:Safe-RL-Benchmark    作者:befelix    | 项目源码 | 文件源码
def __init__(self, policy, rate, train=True):
        self.rate = rate

        with tf.variable_scope('value_estimator'):
            self.X = tf.placeholder(policy.dtype,
                                    shape=policy.X.shape,
                                    name='X')
            self.V = tf.placeholder(policy.dtype,
                                    shape=[None],
                                    name='V')

            self.W = policy.init_weights((policy.layers[0], 1))

            self.V_est = tf.matmul(self.X, self.W)

            self.losses = tf.squared_difference(self.V_est, self.V)
            self.loss = tf.reduce_sum(self.losses, name='loss')

            if train:
                self.opt = tf.train.RMSPropOptimizer(rate, 0.99, 0.0, 1e-6)
                self.grads_and_vars = self.opt.compute_gradients(self.loss)
                self.grads_and_vars = [(g, v) for g, v in self.grads_and_vars
                                       if g is not None]
                self.update = self.opt.apply_gradients(self.grads_and_vars)
项目:OpenLearning4DeepRecsys    作者:Leavingseason    | 项目源码 | 文件源码
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
项目:CIKM2017    作者:heliarmk    | 项目源码 | 文件源码
def combind_loss(logits, labels, reg_preds, reg_labels):
    alpha = 1
    beta = 0.025
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits, name='cross_entropy_per_example')
    cem = tf.reduce_mean(cross_entropy, name='cross_entropy')
    w_cem = cem * alpha
    tf.add_to_collection("losses", w_cem)
    reg_labels = tf.reshape(reg_labels, (-1, 1))
    # rmse = tf.sqrt(tf.losses.mean_squared_error(reg_labels, reg_preds, loss_collection=None))
    rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(reg_labels, reg_preds)))
    w_rmse = rmse * beta
    tf.add_to_collection("losses", w_rmse)

    return tf.add_n(tf.get_collection("losses"), name='combinded_loss'), cem, rmse
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    super(FloatBinaryOpsTest, self).setUp()

    self.ops = [
        ('igamma', None, tf.igamma, core.igamma),
        ('igammac', None, tf.igammac, core.igammac),
        ('zeta', None, tf.zeta, core.zeta),
        ('polygamma', None, tf.polygamma, core.polygamma),
        ('maximum', None, tf.maximum, core.maximum),
        ('minimum', None, tf.minimum, core.minimum),
        ('squared_difference', None, tf.squared_difference,
         core.squared_difference),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    test_lt = core.LabeledTensor(
        tf.cast(self.original_lt, tf.float32) / total_size,
        self.original_lt.axes)
    self.test_lt_1 = test_lt
    self.test_lt_2 = 1.0 - test_lt
    self.test_lt_1_broadcast = self.test_lt_1.tensor
    self.test_lt_2_broadcast = self.test_lt_2.tensor
    self.broadcast_axes = self.test_lt_1.axes
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_distance(self, c, s):
  """Creates a soft-min distance of the centers to the points"""
  c_shape = c.get_shape().as_list();        
  s_shape = s.get_shape().as_list();

  #expand matrices
  cc = tf.reshape(c, [c_shape[0], c_shape[1], 1]);    
  ss = tf.reshape(s, [s_shape[0], s_shape[1], 1]);
  ss = tf.transpose(ss, perm = [0,2,1]);
  cc = tf.tile(cc, [1, 1, s_shape[0]]);
  ss = tf.tile(ss, [c_shape[0], 1, 1]);

  #pairwise distances
  dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 1));
  dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

  #softmin
  return tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 0);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_distance(c, s, k = 2.0):
  """Creates a soft-min distance of the centers to the points"""
  c_shape = c.get_shape().as_list();        
  s_shape = s.get_shape().as_list();

  #expand matrices
  cc = tf.reshape(c, [c_shape[0], c_shape[1], 1]);    
  ss = tf.reshape(s, [s_shape[0], s_shape[1], 1]);
  ss = tf.transpose(ss, perm = [2,1,0]);
  cc = tf.tile(cc, [1, 1, s_shape[0]]);
  ss = tf.tile(ss, [c_shape[0], 1, 1]);
  #cc = tf.transpose(cc, perm = [2,1,0]);
  #cc = tf.tile(cc, [s_shape[0], 1, 1]);
  #ss = tf.tile(ss, [1, 1, c_shape[0]]); 

  #pairwise distances
  dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 1));

  #softmin
  softmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-k,"float32"), dist2)), dist2),reduction_indices = 1);

  return tf.reduce_mean(softmin);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_pair_wise_distances(x, y):
  x_shape = x.get_shape().as_list();        
  y_shape = y.get_shape().as_list();

  #expand matrices
  xx = tf.reshape(x, [x_shape[0], x_shape[1], 1]);    
  yy = tf.reshape(y, [y_shape[0], y_shape[1], 1]);
  yy = tf.transpose(yy, perm = [2,1,0]);
  xx = tf.tile(xx, [1, 1, y_shape[0]]);
  yy = tf.tile(yy, [x_shape[0], 1, 1]);
  #cc = tf.transpose(cc, perm = [2,1,0]);
  #cc = tf.tile(cc, [s_shape[0], 1, 1]);
  #ss = tf.tile(ss, [1, 1, c_shape[0]]); 

  #pairwise distances
  dist = tf.sqrt(tf.reduce_sum(tf.squared_difference(xx,yy), reduction_indices = 1));
  return dist;
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_aligned_distance(x,y,nx,ny, k = 2.0, gamma = 1.0):
  d = create_pair_wise_distances(x, y);
  a = create_pair_wise_dots(nx, ny);
  a = tf.scalar_mul(-0.5, tf.add(a, -1.0)); # [0,1] 0 = aligned
  return tf.reduce_mean(create_aligned_distance(d, a, k = k, gamma = gamma));


#def create_cost_spacing(c, length, normalized = True):
#  c_shape = c.get_shape().as_list();
#  c1 = tf.slice(c, [1,0], [-1,-1]);
#  c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
#  d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
#  if normalized:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
#  else:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_aligned_distance(x,y,nx,ny, k = 2.0, gamma = 1.0):
  d = create_pair_wise_distances(x, y);
  a = create_pair_wise_dots(nx, ny);
  a = tf.scalar_mul(-0.5, tf.add(a, -1.0)); # [0,1] 0 = aligned
  return tf.reduce_mean(create_aligned_distance(d, a, k = k, gamma = gamma));


#def create_cost_spacing(c, length, normalized = True):
#  c_shape = c.get_shape().as_list();
#  c1 = tf.slice(c, [1,0], [-1,-1]);
#  c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
#  d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
#  if normalized:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
#  else:
#    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_distance(self, c, s):
    """Creates a soft-min distance of the centers to the points"""
    c_shape = c.get_shape().as_list();        
    s_shape = s.get_shape().as_list();

    #expand matrices
    cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);    
    ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);
    ss = tf.transpose(ss, perm = [0,3,2,1]);
    cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
    ss = tf.tile(ss, [1, c_shape[0], 1, 1]);

    #pairwise distances
    dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
    dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

    #softmin
    distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
    return tf.reduce_mean(distmin);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_distance_valid(self, c, s, v):
    """Creates a soft-min distance of the centers to the points"""
    c_shape = c.get_shape().as_list();        
    s_shape = s.get_shape().as_list();

    #expand matrices
    cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);    
    mm = tf.reduce_max(v); #hack for batch size = 1
    ss = tf.slice(s, [0,0,0], [-1,mm,-1]);
    ss = tf.reshape(ss, [s_shape[0], s_shape[1], s_shape[2], 1]);
    ss = tf.transpose(ss, perm = [0,3,2,1]);
    cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
    ss = tf.tile(ss, [1, c_shape[0], 1, 1]);

    #pairwise distances
    dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
    dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

    #softmin
    distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
    return tf.reduce_mean(distmin);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_soft_min_distance(self, c, s):
    """Creates a soft-min distance of the centers to the points"""
    c_shape = c.get_shape().as_list();        
    s_shape = s.get_shape().as_list();

    #expand matrices
    cc = tf.reshape(c, [c_shape[0], c_shape[1], c_shape[2], 1]);    
    ss = tf.reshape(s, [s_shape[0], s_shape[1], s_shape[2], 1]);
    ss = tf.transpose(ss, perm = [0,3,2,1]);
    cc = tf.tile(cc, [1, 1, 1, s_shape[0]]);
    ss = tf.tile(ss, [1, c_shape[0], 1, 1]);

    #pairwise distances
    dist2 = tf.sqrt(tf.reduce_sum(tf.squared_difference(cc,ss), reduction_indices = 2));
    dist2 = tf.reduce_mean(dist2, reduction_indices=0); # hack: get rid of batches here 

    #softmin
    distmin = tf.reduce_sum(tf.mul(tf.nn.softmax(tf.scalar_mul(tf.constant(-1.0,"float32"), dist2)), dist2),reduction_indices = 1);
    return tf.reduce_mean(distmin);
项目:deepsleepnet    作者:akaraspt    | 项目源码 | 文件源码
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse
项目:tensorlayer-chinese    作者:shorxp    | 项目源码 | 文件源码
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-square-error of two distributions.

    Parameters
    ----------
    output : 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, w, h] or [batch_size, w, h, c].
    target : 2D, 3D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 3:   # [batch_size, w, h]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2]))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse
项目:predictron    作者:brendanator    | 项目源码 | 文件源码
def loss(preturns, lambda_preturn, labels):
  with tf.variable_scope('loss'):
    preturns_loss = tf.reduce_mean(
        tf.squared_difference(preturns, tf.expand_dims(labels, 1)))

    lambda_preturn_loss = tf.reduce_mean(
        tf.squared_difference(lambda_preturn, labels))

    consistency_loss = tf.reduce_mean(
        tf.squared_difference(
            preturns, tf.stop_gradient(tf.expand_dims(lambda_preturn, 1))))

    l2_loss = tf.get_collection('losses')

    total_loss = preturns_loss + lambda_preturn_loss + consistency_loss
    consistency_loss += l2_loss
    return total_loss, consistency_loss
项目:predictron    作者:brendanator    | 项目源码 | 文件源码
def loss(preturns, lambda_preturn, labels):
  with tf.variable_scope('loss'):
    preturns_loss = tf.reduce_mean(
        tf.squared_difference(preturns, tf.expand_dims(labels, 1)))

    lambda_preturn_loss = tf.reduce_mean(
        tf.squared_difference(lambda_preturn, labels))

    consistency_loss = tf.reduce_mean(
        tf.squared_difference(
            preturns, tf.stop_gradient(tf.expand_dims(lambda_preturn, 1))))

    l2_loss = tf.get_collection('losses')

    total_loss = preturns_loss + lambda_preturn_loss + consistency_loss
    consistency_loss += l2_loss
    return total_loss, consistency_loss
项目:dcgan    作者:zsdonghao    | 项目源码 | 文件源码
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def testRandomFlipBoxes(self):
    boxes = self.createTestBoxes()

    # Case where the boxes are flipped.
    boxes_expected1 = self.expectedBoxesAfterMirroring()

    # Case where the boxes are not flipped.
    boxes_expected2 = boxes

    # After elementwise multiplication, the result should be all-zero since one
    # of them is all-zero.
    boxes_diff = tf.multiply(
        tf.squared_difference(boxes, boxes_expected1),
        tf.squared_difference(boxes, boxes_expected2))
    expected_result = tf.zeros_like(boxes_diff)

    with self.test_session() as sess:
      (boxes_diff, expected_result) = sess.run([boxes_diff, expected_result])
      self.assertAllEqual(boxes_diff, expected_result)
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def target_cost(self, inputs, targets, function=tf.squared_difference, **kwargs):
        """
        For mapping problems, r.m.s. difference between hidden values and targets.
        i.e. Cost for given input batch of samples, under current params.
        """
        hidden = self.get_hidden_values(inputs, **kwargs)
        return tf.reduce_mean(function(hidden, targets))
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def rms_loss(self, inputs, **kwargs):
        """
        Root-mean-squared difference between <inputs> and encoded-decoded output.
        """
        loss = tf.squared_difference(inputs, self.recode(inputs, **kwargs))
        return tf.reduce_mean(
                   tf.reduce_mean(loss, axis=range(1, self.input_dims)) ** .5)
项目:LocaliseNet    作者:najeeb97khan    | 项目源码 | 文件源码
def _create_squared_loss(self, prev_layer, layer_name):

        with tf.variable_scope(layer_name) as scope:
            input_tensor, class_tensor, box_tensor = self.placeholder
            loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(box_tensor,prev_layer),reduction_indices=[1]))
            return loss
项目:LocaliseNet    作者:najeeb97khan    | 项目源码 | 文件源码
def _create_squared_loss(self, prev_layer, layer_name):

        with tf.variable_scope(layer_name) as scope:
            input_tensor, class_tensor, box_tensor = self.placeholder
            loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(box_tensor,prev_layer),reduction_indices=[1]))
            return loss
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def _loss_mse(self):
        sq = tf.squared_difference(self.sigm_network, self.desired_heatmap)
        loss = self._adjust_loss(sq)

        return loss
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def gaussian_log_density(x, mu, sigma2):
        c = - 0.5 * math.log(2 * math.pi)
        density = c - tf.log(sigma2) / 2 - tf.squared_difference(x, mu) / (2 * sigma2)
        # return -tf.reduce_mean(tf.reduce_sum(density, axis=-1), axis=(1, 2))
        return density
项目:TensorFlow-ADGM    作者:dancsalo    | 项目源码 | 文件源码
def gaussian_log_density(x, mu, sigma2):
        c = - 0.5 * math.log(2 * math.pi)
        density = c - tf.log(sigma2) / 2 - tf.squared_difference(x, mu) / (2 * sigma2)
        return -tf.reduce_mean(tf.reduce_sum(density, axis=-1), axis=(1, 2))
项目:urnn    作者:Rand0mUsername    | 项目源码 | 文件源码
def train_urnn_for_timestep_idx(self, idx):
        print('Initializing and training URNNs for one timestep...')

        # CM

        tf.reset_default_graph()
        self.cm_urnn=TFRNN(
            name="cm_urnn",
            num_in=1,
            num_hidden=128,
            num_out=10,
            num_target=1,
            single_output=False,
            rnn_cell=URNNCell,
            activation_hidden=None, # modReLU
            activation_out=tf.identity,
            optimizer=tf.train.RMSPropOptimizer(learning_rate=glob_learning_rate, decay=glob_decay),
            loss_function=tf.nn.sparse_softmax_cross_entropy_with_logits)
        self.train_network(self.cm_urnn, self.cm_data[idx], 
                           self.cm_batch_size, self.cm_epochs)

        # AP

        tf.reset_default_graph()
        self.ap_urnn=TFRNN(
            name="ap_urnn",
            num_in=2,
            num_hidden=512,
            num_out=1,
            num_target=1,
            single_output=True,
            rnn_cell=URNNCell,
            activation_hidden=None, # modReLU
            activation_out=tf.identity,
            optimizer=tf.train.RMSPropOptimizer(learning_rate=glob_learning_rate, decay=glob_decay),
            loss_function=tf.squared_difference)
        self.train_network(self.ap_urnn, self.ap_data[idx], 
                           self.ap_batch_size, self.ap_epochs)

        print('Init and training URNNs for one timestep done.')
项目:pythonml    作者:nicholastoddsmith    | 项目源码 | 文件源码
def _GetLossFn(name):
    '''
    Helper function for selecting loss function
    name:   The name of the loss function
    return:     A handle for a loss function LF(YH, Y)
    '''
    return {'cos': lambda YH, Y : tf.losses.cosine_distance(Y, YH), 'hinge': lambda YH, Y : tf.losses.hinge_loss(Y, YH),
            'l1': lambda YH, Y : tf.losses.absolute_difference(Y, YH), 'l2': lambda YH, Y : tf.squared_difference(Y, YH),
            'log': lambda YH, Y : tf.losses.log_loss(Y, YH), 
            'sgce': lambda YH, Y : tf.nn.sigmoid_cross_entropy_with_logits(labels = Y, logits = YH), 
            'smce': lambda YH, Y : tf.nn.softmax_cross_entropy_with_logits(labels = Y, logits = YH)}.get(name)
项目:CIKM2017    作者:heliarmk    | 项目源码 | 文件源码
def regression_loss(reg_preds, reg_labels):
    rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(reg_labels, reg_preds)))
    tf.add_to_collection('losses', rmse)
    return rmse, tf.add_n(tf.get_collection("losses"), name="total_loss")
项目:CIKM2017    作者:heliarmk    | 项目源码 | 文件源码
def regression_loss(reg_preds, reg_labels):
    rmse = tf.sqrt(tf.reduce_mean(tf.squared_difference(reg_labels, reg_preds)))
    tf.add_to_collection('losses', rmse)
    return tf.add_n(tf.get_collection('losses'), name="total_loss")
项目:Face_Point    作者:EllenSimith    | 项目源码 | 文件源码
def loss(bbox_widths, preds, points, batch_size=100):
  """loss function based on paper, returns a tensor of batch_size.
  """
  diff = tf.squared_difference(preds, points)
  dist = []
  for i in range(5):
    dist.append(tf.reshape(tf.reduce_sum(diff[:,2*i:2*i+2], 1), [batch_size, 1]))
  dist = tf.reduce_sum(tf.sqrt(tf.concat(1, dist)), 1)
  error = tf.div(dist, bbox_widths)
  return error
项目:failures_of_DL    作者:shakedshammah    | 项目源码 | 文件源码
def create_loss(self):
        self._Y_placeholder = tf.placeholder(tf.float32, shape=(None, self._n))
        self._loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(self._p, self._Y_placeholder), reduction_indices=[1]))
项目:failures_of_DL    作者:shakedshammah    | 项目源码 | 文件源码
def create_loss(self):
        self._Y_placeholder = tf.placeholder(tf.float32, shape=(None, self._n))
        self._loss = tf.reduce_mean(
            tf.squared_difference(self._p, self._Y_placeholder))
项目:failures_of_DL    作者:shakedshammah    | 项目源码 | 文件源码
def create_loss(self):
        h1 = affine("affine4", self._p, 100)
        h2 = affine("affine5", h1, 100)
        self._f = affine("affine6", h2, self._n, relu=False)
        self._loss = tf.reduce_mean(tf.squared_difference(self._f, self._f_placeholder))
项目:AdaptiveOptim    作者:tomMoral    | 项目源码 | 文件源码
def _get_cost(self, outputs):
        """Construct the cost function from the outputs of the last layer. This
        will be used through SGD to train the network.

        Parameters
        ----------
        outputs: tuple fo tensors (n_out)
            a tuple of tensor containing the output from the last layer of the
            network

        Returns
        -------
        cost: a tensor computing the cost function of the network.
        reg: a tensor for computing regularization of the parameters.
            It should be None if no regularization is needed.
        """
        Zk, X, lmbd = outputs

        with tf.name_scope("reconstruction_zD"):
            rec = tf.matmul(Zk, tf.constant(self.D))

        with tf.name_scope("norm_2"):
            Er = tf.multiply(
                tf.constant(.5, dtype=tf.float32),
                tf.reduce_mean(tf.reduce_sum(tf.squared_difference(rec, X),
                                             reduction_indices=[1])))

        with tf.name_scope("norm_1"):
            l1 = lmbd * tf.reduce_mean(tf.reduce_sum(
                tf.abs(Zk), reduction_indices=[1]))

        return tf.add(Er, l1, name="cost")
项目:AdaptiveOptim    作者:tomMoral    | 项目源码 | 文件源码
def _get_cost(self, outputs):
        """Construct the cost function from the outputs of the last layer. This
        will be used through SGD to train the network.

        Parameters
        ----------
        outputs: tuple fo tensors (n_out)
            a tuple of tensor containing the output from the last layer of the
            network

        Returns
        -------
        cost: a tensor computing the cost function of the network.
        reg: a tensor for computing regularisation of the parameters.
            It should be 0 if no regularization is needed.
        """
        Zk, _, X, lmbd = outputs

        with tf.name_scope("reconstruction_zD"):
            rec = tf.matmul(Zk, tf.constant(self.D))

        with tf.name_scope("norm_2"):
            Er = .5*tf.reduce_mean(tf.reduce_sum(
                tf.squared_difference(rec, X), reduction_indices=[1]))

        with tf.name_scope("norm_1"):
            l1 = lmbd*tf.reduce_mean(tf.reduce_sum(
                tf.abs(Zk), reduction_indices=[1]))

        return tf.add(Er, l1, name="cost")
项目:AdaptiveOptim    作者:tomMoral    | 项目源码 | 文件源码
def _get_step(self, inputs):
        Z, X, lmbd = self.inputs
        K, p = self.D.shape
        L = self.L
        with tf.name_scope("step_ISTA"):
            self.S = tf.constant(np.eye(K, dtype=np.float32) - self.S0/L,
                                 shape=[K, K], name='S')
            self.We = tf.constant(self.D.T / L, shape=[p, K],
                                  dtype=tf.float32, name='We')
            B = tf.matmul(X, self.We, name='B')
            hk = tf.matmul(Z, self.S) + B
            step = soft_thresholding(hk, lmbd / L)
            dz = tf.reduce_mean(tf.reduce_sum(
                tf.squared_difference(step, Z), reduction_indices=[1]))
        return step, dz
项目:AdaptiveOptim    作者:tomMoral    | 项目源码 | 文件源码
def _get_cost(self, inputs):
        Z, X, lmbd = self.inputs
        with tf.name_scope("Cost"):
            rec = tf.matmul(Z, tf.constant(self.D))
            Er = tf.reduce_mean(
                tf.reduce_sum(tf.squared_difference(rec, X),
                              reduction_indices=[1]))/2
            cost = Er + lmbd * tf.reduce_mean(
                tf.reduce_sum(tf.abs(Z), reduction_indices=[1]))

        return cost
项目:AdaptiveOptim    作者:tomMoral    | 项目源码 | 文件源码
def _get_cost(self, outputs):
        """Construct the cost function from the outputs of the last layer. This
        will be used through SGD to train the network.

        Parameters
        ----------
        outputs: tuple fo tensors (n_out)
            a tuple of tensor containing the output from the last layer of the
            network

        Returns
        -------
        cost: a tensor computing the cost function of the network
        reg: a tensor for computing regularisation of the parameters.
            It should be 0 if no regularization is needed.
        """
        Zk, X, lmbd = outputs

        with tf.name_scope("reconstruction_zD"):
            rec = tf.matmul(Zk, tf.constant(self.D))

        with tf.name_scope("norm_2"):
            Er = .5 * tf.reduce_mean(tf.reduce_sum(
                tf.squared_difference(rec, X), reduction_indices=[1]))

        with tf.name_scope("norm_1"):
            l1 = lmbd * tf.reduce_mean(tf.reduce_sum(
                tf.abs(Zk), reduction_indices=[1]))

        cost = tf.add(Er, l1, name="cost")
        return cost
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_distance(self, l, r, d):
  dd = tf.reduce_sum(tf.squared_difference(l,r), reduction_indices=1);
  dd = tf.squared_difference(dd, d);
  return tf.reduce_mean(dd);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_spacing(self, c, length, normalized = True):
  c_shape = c.get_shape().as_list();
  c1 = tf.slice(c, [1,0], [-1,-1]);
  c2 = tf.slice(c, [0,0], [c_shape[0]-1,-1]);
  d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 1));
  if normalized:
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / (c_shape[0]-1), "float32")));
  else:
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_distance(l, r, d):
  dd = tf.sqrt(tf.reduce_sum(tf.squared_difference(l,r), reduction_indices=1));
  dd = tf.squared_difference(dd, d);
  return tf.reduce_mean(dd);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_distance(l, r, d):
  dd = tf.sqrt(tf.reduce_sum(tf.squared_difference(l,r), reduction_indices=1));
  dd = tf.squared_difference(dd, d);
  return tf.reduce_mean(dd);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_spacing(t, length, normalized = True):
  d = tf.sqrt(tf.reduce_sum(tf.square(t), reduction_indices = 1));
  if normalized:
    s = t.get_shape().as_list();
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / s[0], "float32")));
  else:
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_distance(l, r, d):
  dd = tf.sqrt(tf.reduce_sum(tf.squared_difference(l,r), reduction_indices=1));
  dd = tf.squared_difference(dd, d);
  return tf.reduce_mean(dd);
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_spacing(t, length, normalized = True):
  d = tf.sqrt(tf.reduce_sum(tf.square(t), reduction_indices = 1));
  if normalized:
    s = t.get_shape().as_list();
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length / s[0], "float32")));
  else:
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(length, "float32")));
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_spacing(self, c):
    c1 = tf.slice(c, [0,1,0], [-1,-1,-1]);
    c2 = tf.slice(c, [0,0,0], [-1,self.npoints-1,-1]);
    d = tf.sqrt(tf.reduce_sum(tf.squared_difference(c1,c2), reduction_indices = 2));
    return tf.reduce_mean(tf.squared_difference(d, tf.constant(self.model.length / (self.npoints-1), "float32")));
项目:CElegansBehaviour    作者:ChristophKirst    | 项目源码 | 文件源码
def create_cost_distance(self, l, r, d):
    dd = tf.reduce_sum(tf.squared_difference(l,r), reduction_indices=1);
    dd = tf.squared_difference(dd, d);
    return tf.reduce_mean(dd);