Python tensorflow 模块,divide() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.divide()

项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def sampled_softmax_loss(label, logit, projection, num_sampled):
    """
    Args:
        label:
        logit:          unscaled log probabilities
        projection:     (W, b)
        num_sampled:
    """
    local_label = tf.reshape(label, shape=(-1,1))
    local_logit = tf.reshape(logit, shape=(-1, logit.get_shape()[-1].value))
    local_Wt    = tf.transpose(projection[0], perm=(1,0))
    local_b     = projection[1]
    loss_sum    = tf.nn.sampled_softmax_loss(weights=local_Wt, biases=local_b,
                                             labels=local_label,
                                             inputs=local_logit,
                                             num_sampled=num_sampled,
                                             num_classes=local_Wt.get_shape()[0].value)
    loss = tf.divide(tf.reduce_sum(loss_sum), tf.cast(tf.size(local_label), dtype=tf.float32))
    return loss
项目:RaspberryPi-Robot    作者:timestocome    | 项目源码 | 文件源码
def read_tensor_from_image_file(file_name='test.jpg', input_height=128, input_width=128,
                input_mean=0, input_std=255):


  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  image_reader = tf.image.decode_jpeg(file_reader, channels = 3, name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def masked_softmax(tensor, mask, expand=2, axis=1):
    """Masked soft-max using Lambda and merge-multiplication.

    Args:
        tensor: tensor containing scores
        mask: mask for tensor where 1 - means values at this position and 0 - means void, padded, etc..
        expand: axis along which to repeat mask
        axis: axis along which to compute soft-max

    Returns:
        masked soft-max values
    """

    mask = tf.expand_dims(mask, axis=expand)
    exponentiate = Lambda(lambda x: K.exp(x - K.max(x, axis=axis, keepdims=True)))(tensor)
    masked = tf.multiply(exponentiate, mask)
    div = tf.expand_dims(tf.reduce_sum(masked, axis=axis), axis=axis)
    predicted = tf.divide(masked, div)
    return predicted
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def yuv2rgb(yuv):
    """
    Convert YUV image into RGB https://en.wikipedia.org/wiki/YUV
    """
    yuv = tf.multiply(yuv, 255)
    yuv2rgb_filter = tf.constant([[[[1., 1., 1.], [0., -0.34413999, 1.77199996],
                                    [1.40199995, -0.71414, 0.]]]])
    yuv2rgb_bias = tf.constant([-179.45599365, 135.45983887, -226.81599426])

    yuv = tf.expand_dims(yuv, 0)
    temp = tf.nn.conv2d(yuv, yuv2rgb_filter, [1, 1, 1, 1], 'SAME')
    temp = tf.nn.bias_add(temp, yuv2rgb_bias)
    temp = tf.maximum(temp, tf.zeros(temp.get_shape(), dtype=tf.float32))
    temp = tf.minimum(temp,
                      tf.multiply(
                          tf.ones(temp.get_shape(), dtype=tf.float32), 255))
    temp = tf.divide(temp, 255)
    temp = tf.squeeze(temp, [0])
    return temp
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def loss(self, predictions, real_values):
        """Return the loss operation between predictions and real_values.
        Add L2 weight decay term if any.
        Args:
            predictions: predicted values
            real_values: real values
        Returns:
            Loss tensor of type float.
        """
        with tf.variable_scope('loss'):
            # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
            mse = tf.divide(
                tf.reduce_mean(
                    tf.square(tf.subtract(predictions, real_values))),
                2.,
                name="mse")
            tf.add_to_collection(LOSSES, mse)

            # mse + weight_decay per layer
            error = tf.add_n(tf.get_collection(LOSSES), name='total_loss')

        return error
项目:keras-to-tensorflow    作者:bitbionic    | 项目源码 | 文件源码
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
                input_mean=0, input_std=255):
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def mean(x, reduce_instance_dims=True, name=None):
  """Computes the mean of the values of a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor`.
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the mean. If `x` is floating point, the mean will
    have the same type as `x`. If `x` is integral, the output is cast to float32
    for int8 and int16 and float64 for int32 and int64 (similar to the behavior
    of tf.truediv).
  """
  with tf.name_scope(name, 'mean'):
    # Note: Calling `sum` defined in this module, not the builtin.
    return tf.divide(
        sum(x, reduce_instance_dims), size(x, reduce_instance_dims))
项目:wide-deep-cnn    作者:DaniUPC    | 项目源码 | 文件源码
def optimized_loss(self, targets, logits):
        """ Function that computes the loss of a mixture density network
        in a way that it handles underflow and overflow and avoids unstable
        behaviors """
        # Obtain parameters
        mixings, sigma, mean = self.logits_to_params(logits)
        output_size = tf.cast(tf.shape(targets)[1], tf.float32)
        variance = tf.square(sigma)
        # Convert expressions into exponent-based terms
        mixings_exp = tf.log(mixings)
        # By properties of logarithm we can simplify the original expression
        # log(x/y) = log(x) - log(y), log(xy) = log(x) + log(y), log(1) = 0
        sqrt_exp = - output_size * (0.5 * tf.log(2*np.pi) + tf.log(sigma))
        gaussian_exp = -tf.divide(tf.square(targets - mean), 2 * variance)
        exponent = mixings_exp + sqrt_exp + gaussian_exp
        # Use optimized logsumexp function to control underflow/overflow
        return tf.reduce_logsumexp(exponent, axis=1)
项目:yaset    作者:jtourille    | 项目源码 | 文件源码
def loss_crf(self):
        """
        CRF based loss.
        :return: loss
        """

        # Reshaping seq_len tensor [seq_len, 1]
        seq_length_reshaped = tf.reshape(self.x_tokens_len, [tf.shape(self.x_tokens_len)[0], -1])

        # Computing loss by scanning mini-batch tensor
        out = tf.scan(self.loss_crf_scan, [self.prediction,
                                           seq_length_reshaped,
                                           self.y], back_prop=True, infer_shape=True, initializer=0.0)

        # Division by batch_size
        loss_crf = tf.divide(tf.reduce_sum(out), tf.cast(tf.shape(self.x_tokens)[0], dtype=tf.float32))

        return loss_crf
项目:tensorflow-for-poets-2    作者:googlecodelabs    | 项目源码 | 文件源码
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
                input_mean=0, input_std=255):
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
项目:KBOPrediction    作者:riceluxs1t    | 项目源码 | 文件源码
def get_accuracy(self, x_test_home, x_test_away, y_test, keep_prop=1.0):
        """
        The predictions from x_test_home and x_test_away are mapped to 1 or 0 depending on whether the
        home team wins or not. Then it is compared with y_test which is the ground truth.
        """
        predict = tf.map_fn(
            lambda x: x[0] > x[1],
            self.sess.run(
                self.hypothesis, 
                feed_dict={
                self.X_home: x_test_home, 
                self.X_away: x_test_away, 
                self.Y: y_test, 
                self.keep_prob: keep_prop}
            ), 
            dtype=bool)

        real = tf.map_fn(
            lambda x: x[0] > x[1],
            y_test,
            dtype=bool)

        return self.sess.run(
            tf.divide(
                tf.reduce_sum(tf.cast(tf.equal(predict, real), dtype=tf.int32)), len(y_test)))
项目:TikZ    作者:ellisk42    | 项目源码 | 文件源码
def decodesIntoAccuracy(self, labels, perSymbol = True):
        # as the dimensions None x L
        accuracyMatrix = tf.equal(self.hardOutputs, labels)

        # zero out anything past the labeled length
        accuracyMatrix = tf.logical_and(accuracyMatrix,
                                        tf.sequence_mask(self.lengthPlaceholder, maxlen = self.maximumLength))

        # Some across all of the time steps to get the total number of predictions correct in each batch entry
        accuracyVector = tf.reduce_sum(tf.cast(accuracyMatrix,tf.int32),axis = 1)
        if perSymbol:
            # Now normalize it by the sequence length and take the average
            accuracyVector = tf.divide(tf.cast(accuracyVector,tf.float32),
                                       tf.cast(self.lengthPlaceholder,tf.float32))
        if not perSymbol:
            # accuracy is measured per sequence
            accuracyVector = tf.cast(tf.equal(accuracyVector,self.lengthPlaceholder),tf.float32)
        return tf.reduce_mean(accuracyVector)
项目:pydatalab    作者:googledatalab    | 项目源码 | 文件源码
def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max):
  """Scale a tensor to scale_min to scale_max.

  Args:
    tensor: input tensor. Should be a numerical tensor.
    range_min: min expected value for this feature/tensor.
    range_max: max expected Value.
    scale_min: new expected min value.
    scale_max: new expected max value.

  Returns:
    scaled tensor.
  """
  if range_min == range_max:
    return tensor

  float_tensor = tf.to_float(tensor)
  scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) *
                             tf.constant(float(scale_max - scale_min))),
                            tf.constant(float(range_max - range_min)))
  shifted_tensor = scaled_tensor + tf.constant(float(scale_min))

  return shifted_tensor
项目:transfer_learning_sound_classification    作者:lukeinator42    | 项目源码 | 文件源码
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
                input_mean=0, input_std=255):
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result
项目:triplet-reid    作者:VisualComputingInstitute    | 项目源码 | 文件源码
def endpoints(image, is_training):
    if image.get_shape().ndims != 4:
        raise ValueError('Input must be of size [batch, height, width, 3]')

    image = tf.divide(image, 255.0)

    with tf.contrib.slim.arg_scope(mobilenet_v1_arg_scope(batch_norm_decay=0.9, weight_decay=0.0)):
        _, endpoints = mobilenet_v1(image, num_classes=1001, is_training=is_training)

    endpoints['model_output'] = endpoints['global_pool'] = tf.reduce_mean(
        endpoints['Conv2d_13_pointwise'], [1, 2], name='global_pool', keep_dims=False)

    return endpoints, 'MobilenetV1'


# This is copied and modified from mobilenet_v1.py.
项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def loss_fn(W,b,x_data,y_target):
    logits = tf.subtract(tf.matmul(x_data, W),b)
    norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)
    classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
    total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term))
    return total_loss
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def GumbelSoftmaxLogDensity(y, p, tau):
    # EPS = tf.constant(1e-10)
    k = tf.shape(y)[-1]
    k = tf.cast(k, tf.float32)
    # y = y + EPS
    # y = tf.divide(y, tf.reduce_sum(y, -1, keep_dims=True))
    y = normalize_to_unit_sum(y)
    sum_p_over_y = tf.reduce_sum(tf.divide(p, tf.pow(y, tau)), -1)
    logp = tf.lgamma(k)
    logp = logp + (k - 1) * tf.log(tau)
    logp = logp - k * tf.log(sum_p_over_y)
    logp = logp + sum_p_over_y
    return logp
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def normalize_to_unit_sum(x, EPS=1e-10):
    ''' Along the last dim '''
    EPS = tf.constant(EPS, dtype=tf.float32)
    x = x + EPS
    x_sum = tf.reduce_sum(x, -1, keep_dims=True)
    x = tf.divide(x, x_sum)
    return x
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def accumulate_gradients(self, minibatch_grads, num_minibatches=1):
        """Accumulate gradients for `num_minibatches` minibatches."""
        if self.var_list is None:
            self.var_list = tf.trainable_variables()

        if self.grads_and_vars is None:
            self.grads_and_vars = [(
                tf.Variable(tf.zeros_like(var.initialized_value()),
                            dtype=tf.float32,
                            trainable=False),
                var) for var in self.var_list]

        # Add 1/num_minibatches * minibatch_grads to current gradients.
        def _add_op(gv_tmp, mgv_tmp):
            return tf.add(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        def _set_op(gv_tmp, mgv_tmp):
            return tf.assign(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        #grads = [(gv[0].assign_add(tf.divide(mgv[0], num_minibatches)), gv[1])
        #         for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        #grads = tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _add_op(), fn2 = lambda: _set_op())
        grads = [tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _set_op(gv[0], mgv[0]), fn2 = lambda: _add_op(gv[0], mgv[0]))
                 for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        with tf.control_dependencies(grads):
            self.mini_flag = tf.assign(self.mini_flag, tf.constant([1], dtype = tf.float32))
        grads = [(only_grad, gv[1])
                 for (gv, only_grad) in zip(self.grads_and_vars, grads)]
        return self.mini_flag, grads
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def __call__(self, query):

        with tf.variable_scope('attention'):
            # Check if the memory's batch_size is consistent with query's batch_size

            query_units = query.get_shape()[-1].value

            Wa = tf.get_variable(name='Wa', shape=(query_units, self.attention_units))
            Va = tf.get_variable(name='Va', shape=(self.attention_units,),
                                 initializer=tf.constant_initializer(0.0) if self.mode == 0 else tf.constant_initializer(1e-2))
            b  = tf.get_variable(name='b',  shape=(self.attention_units,),
                                 initializer=tf.constant_initializer(0.0) if self.mode == 0 else tf.constant_initializer(0.5))

            # 1st. compute query_feat (query's repsentation in attention module)
            query_feat = tf.reshape(tf.matmul(query, Wa), (-1, 1, 1, self.attention_units))

            # 2nd. compute the energy for all time steps in encoder (element-wise mul then reduce)
            e = tf.reduce_sum(Va * tf.nn.tanh(self.hidden_feats + query_feat + b), axis=(2,3))

            # 3rd. compute the score
            if self.mask is not None:
                exp_e = tf.exp(e)
                exp_e = exp_e * self.mask
                alpha = tf.divide(exp_e, tf.reduce_sum(exp_e, axis=-1, keep_dims=True))
            else:
                alpha = tf.nn.softmax(e)

            # 4th. get the weighted context from memory (element-wise mul then reduce)
            context = tf.reshape(alpha, (tf.shape(query)[0], self.enc_length, 1, 1)) * self.memory
            context = tf.reduce_sum(context, axis=(1, 2))

            return context, alpha
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def perplexity(label, logit):
    words = tf.cast(tf.size(label), tf.float32)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logit)
    cross_entropy = tf.divide(tf.reduce_sum(cross_entropy), words)
    perplex = tf.pow(2.0, cross_entropy)
    return perplex
项目:RaspberryPi-Robot    作者:timestocome    | 项目源码 | 文件源码
def capture_image(self):

      image = np.empty((self.width, self.height, 3), dtype=np.uint8)
      self.camera.capture(image, 'rgb')

      float_caster = tf.cast(image, tf.float32)
      dims_expander = tf.expand_dims(float_caster, 0);
      resized = tf.image.resize_bilinear(dims_expander, [self.height, self.width])

      normalized = tf.divide(tf.subtract(resized, [self.input_mean]), [self.input_std])
      sess = tf.Session()
      result = sess.run(normalized)

      return result
项目:bp-mll-tensorflow    作者:vanHavel    | 项目源码 | 文件源码
def bp_mll_loss(y_true, y_pred):

    # get true and false labels
    shape = tf.shape(y_true)
    y_i = tf.equal(y_true, tf.ones(shape))
    y_i_bar = tf.not_equal(y_true, tf.ones(shape))

    # get indices to check
    truth_matrix = tf.to_float(pairwise_and(y_i, y_i_bar))

    # calculate all exp'd differences
    sub_matrix = pairwise_sub(y_pred, y_pred)
    exp_matrix = tf.exp(tf.negative(sub_matrix))

    # check which differences to consider and sum them
    sparse_matrix = tf.multiply(exp_matrix, truth_matrix)
    sums = tf.reduce_sum(sparse_matrix, axis=[1,2])

    # get normalizing terms and apply them
    y_i_sizes = tf.reduce_sum(tf.to_float(y_i), axis=1)
    y_i_bar_sizes = tf.reduce_sum(tf.to_float(y_i_bar), axis=1)
    normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)
    results = tf.divide(sums, normalizers)

    # sum over samples
    return tf.reduce_sum(results)

# compute pairwise differences between elements of the tensors a and b
项目:tf-crnn    作者:solivr    | 项目源码 | 文件源码
def preprocess_image_for_prediction(fixed_height: int=32, min_width: int=8):
    """
    Input function to use when exporting the model for making predictions (see estimator.export_savedmodel)
    :param fixed_height: height of the input image after resizing
    :param min_width: minimum width of image after resizing
    :return:
    """

    def serving_input_fn():
        # define placeholder for input image
        image = tf.placeholder(dtype=tf.float32, shape=[None, None, 1])

        shape = tf.shape(image)
        # Assert shape is h x w x c with c = 1

        ratio = tf.divide(shape[1], shape[0])
        increment = CONST.DIMENSION_REDUCTION_W_POOLING
        new_width = tf.cast(tf.round((ratio * fixed_height) / increment) * increment, tf.int32)

        resized_image = tf.cond(new_width < tf.constant(min_width, dtype=tf.int32),
                                true_fn=lambda: tf.image.resize_images(image, size=(fixed_height, min_width)),
                                false_fn=lambda: tf.image.resize_images(image, size=(fixed_height, new_width))
                                )

        # Features to serve
        features = {'images': resized_image[None],  # cast to 1 x h x w x c
                    'images_widths': new_width[None]  # cast to tensor
                    }

        # Inputs received
        receiver_inputs = {'images': image}

        return tf.estimator.export.ServingInputReceiver(features, receiver_inputs)

    return serving_input_fn
项目:TFExperiments    作者:gnperdue    | 项目源码 | 文件源码
def compute_categorical_loss_and_accuracy(logits, targets):
    """return total loss, reg loss (subset of total), and accuracy"""
    with tf.variable_scope('loss'):
        regularization_losses = sum(
            tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES
            )
        )
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=targets
            ),
            axis=0,
            name='loss'
        ) + regularization_losses
        preds = tf.nn.softmax(logits, name='preds')
        correct_preds = tf.equal(
            tf.argmax(preds, 1), tf.argmax(targets, 1),
            name='correct_preds'
        )
        accuracy = tf.divide(
            tf.reduce_sum(tf.cast(correct_preds, tf.float32)),
            tf.cast(tf.shape(targets)[0], tf.float32),
            name='accuracy'
        )
    return loss, regularization_losses, accuracy
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def read_depth(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape
    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def read_depth_small(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape

    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def read_depth_sample(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape
    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.sh,self.sw),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def read_depth(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape
    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def read_depth_small(self, filename):
    depth_mat = sio.loadmat(filename)
    depthtmp=depth_mat["depth"]
    ds = depthtmp.shape

    if self.is_crop:
      depth = scipy.misc.imresize(depthtmp,(self.output_height,self.output_width),mode='F')
    depth = np.array(depth).astype(np.float32)
    depth = np.multiply(self.max_depth,np.divide(depth,depth.max()))

    return depth
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def _mse(self, input_x, output_x):
        # 1/2n \sum^{n}_{i=i}{(x_i - x'_i)^2}
        return tf.divide(
            tf.reduce_mean(tf.square(tf.subtract(input_x, output_x))),
            2.,
            name="mse")
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def r2_op(predictions, targets):
    """ r2_op.

    An op that calculates the standard error.

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        stderr_op = r2_op(y_pred, y_true)

        # Calculate standard error by feeding data X and labels Y
        std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
Arguments:
    predictions: `Tensor`.
    targets: `Tensor`.

Returns:
    `Float`. The standard error.

"""
with tf.name_scope('StandardError'):
    a = tf.reduce_sum(tf.square(predictions))
    b = tf.reduce_sum(tf.square(targets))
    return tf.divide(a, b)

```

项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def weighted_r2_op(predictions, targets, inputs):
    """ weighted_r2_op.

    An op that calculates the standard error.

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        stderr_op = weighted_r2_op(y_pred, y_true, input_data)

        # Calculate standard error by feeding data X and labels Y
        std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
Arguments:
    predictions: `Tensor`.
    targets: `Tensor`.
    inputs: `Tensor`.

Returns:
    `Float`. The standard error.

"""
with tf.name_scope('WeightedStandardError'):
    if hasattr(inputs, '__len__'):
        inputs = tf.add_n(inputs)
    if inputs.get_shape().as_list() != targets.get_shape().as_list():
        raise Exception("Weighted R2 metric requires Inputs and Targets to "
                        "have same shape.")
    a = tf.reduce_sum(tf.square(predictions - inputs))
    b = tf.reduce_sum(tf.square(targets - inputs))
    return tf.divide(a, b)

```

项目:DenseHumanBodyCorrespondences    作者:halimacc    | 项目源码 | 文件源码
def _loss(self, labels, logits):
        float_labels = tf.cast(labels, tf.float32)

        epsilon = tf.constant(value=1e-4)
        softmax = tf.nn.softmax(logits) + epsilon
        cross_entropy = -tf.reduce_sum(float_labels * tf.log(softmax), reduction_indices=[-1])
        cross_entropy_mean = tf.reduce_mean(cross_entropy)

        total_pixels = tf.constant(value=conf.width * conf.height, dtype=tf.float32)
        valid_pixels = tf.reduce_sum(float_labels)
        loss = tf.divide(tf.multiply(cross_entropy_mean, total_pixels), valid_pixels)

        return loss
项目:spoofnet-tensorflow    作者:yomna-safaa    | 项目源码 | 文件源码
def get_eval_ops(logits, labels, one_hot=False, scope='', calc_accuracy=True):
    """Evaluate the quality of the logits at predicting the label.
      Args:
        logits: Logits tensor, float - [batch_size, NUM_CLASSES].
        labels: Labels tensor, int32 - [batch_size], with values in the
          range [0, NUM_CLASSES).
      Returns:
        A scalar int32 tensor with the number of examples (out of batch_size)
        that were predicted correctly.
      """
    print('Evaluation Ops..')
    with tf.name_scope(scope):
        # For a classifier model, we can use the in_top_k Op.
        # It returns a bool tensor with shape [batch_size] that is true for
        # the examples where the label's is was in the top k (here k=1)
        # of all logits for that example.
        # labels = tf.cast(labels, tf.int64)
        if one_hot:
            labels = tf.argmax(labels, 1)
        top_1_op = tf.nn.in_top_k(logits, labels, 1)
        num_correct = tf.reduce_sum(tf.cast(top_1_op, tf.float32))

        if calc_accuracy:
            acc_percent = tf.divide(num_correct, labels.shape[0].value)
        else:
            acc_percent = tf.constant(0.0)

        # =============
        y_const = tf.constant(-1, dtype=labels.dtype)
        y_greater = tf.greater(labels, y_const)
        n_all = tf.reduce_sum(tf.cast(y_greater, tf.float32))

        return top_1_op, acc_percent * 100.0, num_correct, n_all, labels


########################################################################
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def Div_FwGrad(op, dx, dy, _op_table=None, _grad_table=None):
  x = op.inputs[0]
  y = op.inputs[1]
  if dx is None and dy is None:
    return None
  elif dx is not None and dy is None:
    return tf.divide(dx, y)
  elif dy is not None and dx is None:
    return -tf.divide(x * dy, y**2)
  else:
    return tf.divide(y * dx - x * dy, y**2)
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def _compute_err(self, u, v):
        """ Given 2 tensors compute the euclidean distance (L2) between maxima locations
        Args:
            u       : 2D - Tensor (Height x Width : 64x64 )
            v       : 2D - Tensor (Height x Width : 64x64 )
        Returns:
            (float) : Distance (in [0,1])
        """
        u_x,u_y = self._argmax(u)
        v_x,v_y = self._argmax(v)
        return tf.divide(tf.sqrt(tf.square(tf.to_float(u_x - v_x)) + tf.square(tf.to_float(u_y - v_y))), tf.to_float(91))
项目:LearningFromHumanPreferences    作者:ZachisGit    | 项目源码 | 文件源码
def init_tf(self):
        tf.reset_default_graph()
        self.graph =tf.Graph()
        with self.graph.as_default():
            self.initializer = tf.truncated_normal_initializer(stddev=0.3)

            self.input_o0 = tf.placeholder(shape=[None,self.obs_size],dtype=tf.float32)
            self.input_o1 = tf.placeholder(shape=[None,self.obs_size],dtype=tf.float32)
            self.preference_distribution = tf.placeholder(shape=[2],dtype=tf.float32)
            self.model_o0 = self.create_model(self.input_o0)
            self.model_o1 = self.create_model(self.input_o1,reuse=True)
            self.batch_sizes = tf.placeholder(shape=[2],dtype=tf.float32)
            #'''
            self.model_o0_sum = tf.exp(tf.divide(tf.reduce_sum(self.model_o0),self.batch_sizes[0]))
            self.model_o1_sum = tf.exp(tf.divide(tf.reduce_sum(self.model_o1),self.batch_sizes[1]))
            #self.model_o1_sum = tf.exp(tf.reduce_sum(self.model_o1))
            self.p_o0_o1 = tf.divide(self.model_o0_sum,tf.add(self.model_o0_sum,self.model_o1_sum))
            self.p_o1_o0 = tf.divide(self.model_o1_sum,tf.add(self.model_o1_sum,self.model_o0_sum))
            self.loss = -tf.add(tf.multiply(self.preference_distribution[0],tf.log(self.p_o0_o1)), \
                    tf.multiply(self.preference_distribution[1],tf.log(self.p_o1_o0)))

            '''
            self.model_o0_sum = tf.exp(tf.reduce_sum(self.model_o0))
            self.model_o1_sum = tf.exp(tf.reduce_sum(self.model_o1))
            self.p_o0_o1 = tf.add(1e-5,tf.divide(self.model_o0_sum,tf.add(1e-5,tf.add(self.model_o0_sum,self.model_o1_sum))))
            self.p_o1_o0 = tf.add(1e-5,tf.divide(self.model_o1_sum,tf.add(1e-5,tf.add(self.model_o1_sum,self.model_o0_sum))))
            self.loss = tf.add(1e-5,-tf.add(tf.multiply(self.preference_distribution[0],tf.log(self.p_o0_o1)), \
                    tf.multiply(self.preference_distribution[1],tf.log(self.p_o1_o0))))
            #'''
            self.train_step = tf.train.AdamOptimizer(learning_rate=self.LEARNING_RATE).minimize(self.loss)
            self.sess = tf.Session()
            self.sess.run(tf.global_variables_initializer())

            self.saver = tf.train.Saver(tf.global_variables())
            self.checkpoint_path = "./human_critic/hc_model/"+self.datetime_str+"/hc_model_"+self.datetime_str+".ckpt"
项目:chicksexer    作者:kensk8er    | 项目源码 | 文件源码
def _mean_pool(self, rnn_outputs, batch_size, max_char_len, max_word_len, char_lens):
        """
        Perform mean-pooling after the character-RNN layer.

        :param rnn_outputs: hidden states of all the time steps after the character-RNN layer
        :return: mean of the hidden states over every time step
        """
        # perform mean pooling over characters
        rnn_outputs = tf.reduce_mean(rnn_outputs, reduction_indices=1)

        # In order to avoid 0 padding affect the mean, multiply by `n / m` where `n` is
        # `max_char_len` and `m` is `char_lens`
        rnn_outputs = tf.multiply(rnn_outputs, tf.cast(max_char_len, tf.float32))  # multiply by `n`

        # swap the dimensions in order to divide by an appropriate value for each time step
        rnn_outputs = tf.transpose(rnn_outputs)

        rnn_outputs = tf.divide(rnn_outputs, tf.cast(char_lens, tf.float32))  # divide by `m`
        rnn_outputs = tf.transpose(rnn_outputs)  # shape back to the original shape

        # batch and word-len dimensions were merged before running character-RNN so shape it back
        rnn_outputs = tf.reshape(rnn_outputs, [batch_size, max_word_len, self._char_rnn_size * 2])

        # there are NaN due to padded words (with char_len=0) so convert those NaN to 0
        rnn_outputs = tf.where(tf.is_nan(rnn_outputs), tf.zeros_like(rnn_outputs), rnn_outputs)

        return rnn_outputs
项目:Y8M    作者:mpekalski    | 项目源码 | 文件源码
def SampleSpacedFrames(model_input, num_frames, num_samples, header=2):
  batch_size = tf.shape(model_input)[0]

  sequence_float = tf.divide(tf.range(header, num_samples + header, dtype=tf.float32),  
                             tf.cast(header * 2 + num_samples, tf.float32) )
  sequence_mat = tf.reshape(sequence_float, [1, -1])

  frame_index = tf.cast( tf.matmul( tf.cast(num_frames, tf.float32), sequence_mat), tf.int32)
  frame_index = tf.minimum(frame_index, tf.cast(num_frames - 1, tf.int32))

  batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:Y8M    作者:mpekalski    | 项目源码 | 文件源码
def SampleSpacedFrames(model_input, num_frames, num_samples, header=2):
  batch_size = tf.shape(model_input)[0]

  sequence_float = tf.divide(tf.range(header, num_samples + header, dtype=tf.float32),  
                             tf.cast(header * 2 + num_samples, tf.float32) )
  sequence_mat = tf.reshape(sequence_float, [1, -1])

  frame_index = tf.cast( tf.matmul( tf.cast(num_frames, tf.float32), sequence_mat), tf.int32)
  frame_index = tf.minimum(frame_index, tf.cast(num_frames - 1, tf.int32))

  batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:tensorflow-layer-library    作者:bioinf-jku    | 项目源码 | 文件源码
def max_norm_all_tensors(tensor_list, clip: bool = True):
    """Normalization of list of tensors by maximum of tensors"""
    maxima = [tf.reduce_max(tf.abs(tensor)) for tensor in tensor_list]
    maxima = tf.stack(maxima)
    if clip:
        maximum = tf.reduce_max(maxima) + 1e-16
    else:
        maximum = tf.reduce_max(maxima)
    return [tf.divide(tensor, maximum) for tensor in tensor_list]
项目:wide-deep-cnn    作者:DaniUPC    | 项目源码 | 文件源码
def _gaussian_pdf(self, x, mixings, sigma, mean):
        """ Wrapper for Gaussian PDF """
        variance = tf.square(sigma)
        output_size = tf.cast(tf.shape(mean)[1], tf.float32)
        # Left: 1/sqrt(pi * 2 * variance) [N, K]
        left = tf.reciprocal(tf.pow(2*np.pi, output_size/2.0) *
                             tf.pow(sigma, output_size))
        # Exponent: e^[-(x-mu)^2/(2var)]. [N, K]
        right = tf.exp(-tf.divide(tf.square(x - mean), 2 * variance))
        return tf.multiply(left, right)
项目:yaset    作者:jtourille    | 项目源码 | 文件源码
def _get_weight(in_size, out_size):
        """
        Weight matrix initialization following Xavier initialization
        :param in_size: input size
        :param out_size: output size
        :return: weight matrix
        """

        min_val = -np.divide(np.sqrt(6), np.sqrt(np.add(in_size, out_size)))
        max_val = np.divide(np.sqrt(6), np.sqrt(np.add(in_size, out_size)))

        return tf.random_uniform([in_size, out_size], minval=min_val, maxval=max_val)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _process_towers_loss(self, dataset, opt, model, is_training=False, reuse=True, is_classification=True, loss_type='cross_entropy'):
        tower_loss = []
        predictions = []
        validation_metric = []
        validation_metric_tmp = [[] for _, _ in self.validation_metrics_def]
        for i in xrange(self.cnf.get('num_gpus', 1)):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('%s_%d' % (self.cnf.get('TOWER_NAME', 'tower'), i)) as scope:
                    images, labels = inputs(dataset, self.cnf['tfrecords_im_size'], self.cnf.get(
                        'crop_size'), batch_size=self.cnf['batch_size_test'], num_preprocess_threads=32, num_readers=8, image_preprocessing=self.preprocessor.preprocess_image)
                    labels = self._adjust_ground_truth(labels)
                    loss_pred = self._tower_loss(
                        scope, model, images, labels, is_training=is_training, reuse=reuse, is_classification=is_classification, loss_type=loss_type)
                    tower_loss.append(loss_pred['loss'])
                    predictions.append(loss_pred['predictions'])
                    if self.loss_type == 'kappa_log':
                        labels = tf.argmax(labels, axis=1)
                    for i, (_, metric_function) in enumerate(self.validation_metrics_def):
                        metric_score = metric_function(
                            labels, tf.argmax(loss_pred['predictions'], 1))
                        validation_metric_tmp[i].append(metric_score)
        predictions = tf.convert_to_tensor(predictions)
        predictions = tf.reshape(predictions, [-1, self.num_classes])
        for i, (_, _) in enumerate(self.validation_metrics_def):
            validation_metric.append(
                tf.divide(sum(validation_metric_tmp[i]), self.cnf.get('num_gpus')))
        return sum(tower_loss), predictions, validation_metric
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _compute_weights(self, labels):
        log.debug('Computing weights from batch labels')
        labels = tf.cast(labels, dtype=tf.float32)
        lshape = tf.cast(tf.shape(labels), dtype=tf.float32)
        weights = tf.divide(tf.reduce_sum(
            labels, axis=0, keep_dims=True), lshape[0])
        return tf.tile(weights, [tf.shape(labels)[0], 1])
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def __call__(self, img, is_training):
        if self.channel_wise:
            img_mean = img.mean(axis=(1, 2))
            img_std = img.std(axis=(1, 2))
            np.subtract(img, img_mean.reshape(3, 1, 1), out=img)
            np.divide(img, (img_std + 1e-4).reshape(3, 1, 1), out=img)
        else:
            img_mean = img.mean()
            img_std = img.std()
            np.subtract(img, img_mean, out=img)
            np.divide(img, img_std + 1e-4, out=img)

        np.clip(img, -self.clip, self.clip, out=img)
        return img
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def __call__(self, img, is_training):
        np.subtract(img, self.mean[:, np.newaxis, np.newaxis], out=img)
        np.divide(img, self.std[:, np.newaxis, np.newaxis], out=img)
        if is_training:
            img = self.augment_color(img, sigma=self.sigma)
        else:
            # tta (test time augmentation)
            img = self.augment_color(img, color_vec=self.color_vec)
        return img
项目:learning-rank-public    作者:andreweskeclarke    | 项目源码 | 文件源码
def ranknet(x, relevance_labels, learning_rate, n_hidden, build_vars_fn, score_with_batchnorm_update_fn, score_fn):
    n_out = 1
    sigma = 1
    n_data = tf.shape(x)[0]

    print('USING SIGMA = %f' % sigma)
    params = build_vars_fn()
    predicted_scores, bn_params = score_with_batchnorm_update_fn(x, params)
    S_ij = tf.maximum(tf.minimum(1., relevance_labels - tf.transpose(relevance_labels)), -1.)
    real_scores = (1/2)*(1+S_ij)
    pairwise_predicted_scores = predicted_scores - tf.transpose(predicted_scores)
    lambdas = sigma*(1/2)*(1-S_ij) - sigma*tf.divide(1, (1 + tf.exp(sigma*pairwise_predicted_scores)))

    non_updating_predicted_scores = score_fn(x, bn_params, params)
    non_updating_S_ij = tf.maximum(tf.minimum(1., relevance_labels - tf.transpose(relevance_labels)), -1.)
    non_updating_real_scores = (1/2)*(1+non_updating_S_ij)
    non_updating_pairwise_predicted_scores = non_updating_predicted_scores - tf.transpose(non_updating_predicted_scores)
    non_updating_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=non_updating_pairwise_predicted_scores, labels=non_updating_real_scores))

    def get_derivative(W_k):
        dsi_dWk = tf.map_fn(lambda x_i: tf.squeeze(tf.gradients(score_fn(tf.expand_dims(x_i, 0), bn_params, params), [W_k])[0]), x)
        dsi_dWk_minus_dsj_dWk = tf.expand_dims(dsi_dWk, 1) - tf.expand_dims(dsi_dWk, 0)
        desired_lambdas_shape = tf.concat([tf.shape(lambdas), tf.ones([tf.rank(dsi_dWk_minus_dsj_dWk) - tf.rank(lambdas)], dtype=tf.int32)], axis=0)
        return tf.reduce_mean(tf.reshape(lambdas, desired_lambdas_shape)*dsi_dWk_minus_dsj_dWk, axis=[0,1])

    flat_params = [Wk for pk in params for Wk in pk]
    grads = [get_derivative(Wk) for Wk in flat_params]
    adam = tf.train.AdamOptimizer(learning_rate=learning_rate)
    adam_op = adam.apply_gradients([(tf.reshape(grad, tf.shape(param)), param) for grad, param in zip(grads, flat_params)])

    def optimizer(sess, feed_dict):
        sess.run(adam_op, feed_dict=feed_dict)

    def get_score(sess, feed_dict):
        return sess.run(non_updating_predicted_scores, feed_dict=feed_dict)

    return non_updating_cost, optimizer, get_score
项目:sText2Image    作者:elliottwu    | 项目源码 | 文件源码
def kl_divergence(p, q):
    tf.assert_rank(p,2)
    tf.assert_rank(q,2)

    p_shape = tf.shape(p)
    q_shape = tf.shape(q)
    tf.assert_equal(p_shape, q_shape)

    # normalize sum to 1
    p_ = tf.divide(p, tf.tile(tf.expand_dims(tf.reduce_sum(p,axis=1), 1), [1,p_shape[1]]))
    q_ = tf.divide(q, tf.tile(tf.expand_dims(tf.reduce_sum(q,axis=1), 1), [1,p_shape[1]]))

    return tf.reduce_sum(tf.multiply(p_, tf.log(tf.divide(p_, q_))), axis=1)