Python tensorflow 模块,is_finite() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用tensorflow.is_finite()

项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_acc(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    correct_pixels_per_class = K.sum(equal_entries, axis=1)
    n_pixels_per_class = K.sum(y_true_reshaped,axis=1)

    acc = correct_pixels_per_class / n_pixels_per_class
    acc_mask = tf.is_finite(acc)
    acc_masked = tf.boolean_mask(acc,acc_mask)

    return K.mean(acc_masked)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
    old_hamiltonian, old_log_prob = hamiltonian(
        q, p, log_posterior, mass, data_axes)
    new_hamiltonian, new_log_prob = hamiltonian(
        new_q, new_p, log_posterior, mass, data_axes)
    old_log_prob = tf.check_numerics(
        old_log_prob,
        'HMC: old_log_prob has numeric errors! Try better initialization.')
    acceptance_rate = tf.exp(
        tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
    is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
                               tf.is_finite(new_log_prob))
    acceptance_rate = tf.where(is_finite, acceptance_rate,
                               tf.zeros_like(acceptance_rate))
    return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
        acceptance_rate
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_IoU(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    intersection = K.sum(equal_entries, axis=1)
    union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)

    iou = intersection / (union_per_class - intersection)
    iou_mask = tf.is_finite(iou)
    iou_masked = tf.boolean_mask(iou,iou_mask)

    return K.mean( iou_masked )
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
                                         check_inf_nan):
  """Calculate the average gradient for a shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
      (gradient, variable) pair within the outer list represents the gradient
      of the variable calculated for a single tower, and the number of pairs
      equals the number of towers.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  grads = [g for g, _ in grad_and_vars]
  grad = tf.add_n(grads)

  if use_mean and len(grads) > 1:
    grad = tf.multiply(grad, 1.0 / len(grads))

  v = grad_and_vars[0][1]
  if check_inf_nan:
    has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
    return (grad, v), has_nan_or_inf
  else:
    return (grad, v), None
项目:srcnn-tensorflow    作者:tjvandal    | 项目源码 | 文件源码
def _loss(self, predictions):
        with tf.name_scope("loss"):
            # if training then crop center of y, else, padding was applied
            slice_amt = (np.sum(self.filter_sizes) - len(self.filter_sizes)) / 2
            slice_y = self.y_norm[:,slice_amt:-slice_amt, slice_amt:-slice_amt]
            _y = tf.cond(self.is_training, lambda: slice_y, lambda: self.y_norm)
            tf.subtract(predictions, _y)
            err = tf.square(predictions - _y)
            err_filled = utils.fill_na(err, 0)
            finite_count = tf.reduce_sum(tf.cast(tf.is_finite(err), tf.float32))
            mse = tf.reduce_sum(err_filled) / finite_count
            return mse
项目:srcnn-tensorflow    作者:tjvandal    | 项目源码 | 文件源码
def fill_na(x, fillval=0):
    fill = tf.ones_like(x) * fillval
    return tf.where(tf.is_finite(x), x, fill)
项目:srcnn-tensorflow    作者:tjvandal    | 项目源码 | 文件源码
def nanmean(x, axis=None):
    x_filled = fill_na(x, 0)
    x_sum = tf.reduce_sum(x_filled, axis=axis)
    x_count = tf.reduce_sum(tf.cast(tf.is_finite(x), tf.float32), axis=axis)
    return tf.div(x_sum, x_count)
项目:srcnn-tensorflow    作者:tjvandal    | 项目源码 | 文件源码
def nanvar(x, axis=None):
    x_filled = fill_na(x, 0)
    x_count = tf.reduce_sum(tf.cast(tf.is_finite(x), tf.float32), axis=axis)
    x_mean = nanmean(x, axis=axis)
    x_ss = tf.reduce_sum((x_filled - x_mean)**2, axis=axis)
    return x_ss / x_count
项目:monodepth360    作者:srijanparmeshwar    | 项目源码 | 文件源码
def attenuate_rectilinear(self, K, disparity, position):
        S, T = lat_long_grid([tf.shape(disparity)[1], tf.shape(disparity)[2]])
        _, T_grids = self.expand_grids(S, -T, tf.shape(disparity)[0])
        if position == "top":
            attenuated_disparity = (1.0 / np.pi) * (tf.atan(disparity / K[1] + tf.tan(T_grids)) - T_grids)
        else:
            attenuated_disparity = (1.0 / np.pi) * (T_grids - tf.atan(tf.tan(T_grids) - disparity / K[1]))
        return tf.clip_by_value(tf.where(tf.is_finite(attenuated_disparity), attenuated_disparity, tf.zeros_like(attenuated_disparity)), 1e-6, 0.75)
项目:monodepth360    作者:srijanparmeshwar    | 项目源码 | 文件源码
def attenuate_equirectangular(self, disparity, position):
        S, T = lat_long_grid([tf.shape(disparity)[1], tf.shape(disparity)[2]])
        _, T_grids = self.expand_grids(S, -T, tf.shape(disparity)[0])
        if position == "top":
            attenuated_disparity = (1.0 / np.pi) * (tf.atan(tf.tan(np.pi * disparity) + tf.tan(T_grids)) - T_grids)
        else:
            attenuated_disparity = (1.0 / np.pi) * (T_grids - tf.atan(tf.tan(T_grids) - tf.tan(np.pi * disparity)))
        return tf.clip_by_value(tf.where(tf.is_finite(attenuated_disparity), attenuated_disparity, tf.zeros_like(attenuated_disparity)), 1e-6, 0.75)
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def _mapper(self, grad, var):
        # this is very slow...
        #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient')
        return grad
项目:hand3d    作者:lmb-freiburg    | 项目源码 | 文件源码
def calc_center_bb(binary_class_mask):
    """ Returns the center of mass coordinates for the given binary_class_mask. """
    with tf.variable_scope('calc_center_bb'):
        binary_class_mask = tf.cast(binary_class_mask, tf.int32)
        binary_class_mask = tf.equal(binary_class_mask, 1)
        s = binary_class_mask.get_shape().as_list()
        if len(s) == 4:
            binary_class_mask = tf.squeeze(binary_class_mask, [3])

        s = binary_class_mask.get_shape().as_list()
        assert len(s) == 3, "binary_class_mask must be 3D."
        assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"

        # my meshgrid
        x_range = tf.expand_dims(tf.range(s[1]), 1)
        y_range = tf.expand_dims(tf.range(s[2]), 0)
        X = tf.tile(x_range, [1, s[2]])
        Y = tf.tile(y_range, [s[1], 1])

        bb_list = list()
        center_list = list()
        crop_size_list = list()
        for i in range(s[0]):
            X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
            Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)

            x_min = tf.reduce_min(X_masked)
            x_max = tf.reduce_max(X_masked)
            y_min = tf.reduce_min(Y_masked)
            y_max = tf.reduce_max(Y_masked)

            start = tf.stack([x_min, y_min])
            end = tf.stack([x_max, y_max])
            bb = tf.stack([start, end], 1)
            bb_list.append(bb)

            center_x = 0.5*(x_max + x_min)
            center_y = 0.5*(y_max + y_min)
            center = tf.stack([center_x, center_y], 0)

            center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
                                  lambda: tf.constant([160.0, 160.0]))
            center.set_shape([2])
            center_list.append(center)

            crop_size_x = x_max - x_min
            crop_size_y = y_max - y_min
            crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
            crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
                                  lambda: tf.constant([100.0]))
            crop_size.set_shape([1])
            crop_size_list.append(crop_size)

        bb = tf.stack(bb_list)
        center = tf.stack(center_list)
        crop_size = tf.stack(crop_size_list)

        return center, bb, crop_size
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def ImageSample(inputs):
    """
    Sample the template image, using the given coordinate, by bilinear interpolation.
    It mimics the same behavior described in:
    `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.

    :param input: [template, mapping]. template of shape NHWC. mapping of
        shape NHW2, where each pair of the last dimension is a (y, x) real-value
        coordinate.
    :returns: a NHWC output tensor.
    """
    template, mapping = inputs
    assert template.get_shape().ndims == 4 and mapping.get_shape().ndims == 4

    mapping = tf.maximum(mapping, 0.0)
    lcoor = tf.cast(mapping, tf.int32)  # floor
    ucoor = lcoor + 1

    # has to cast to int32 and then cast back
    # tf.floor have gradient 1 w.r.t input
    # TODO bug fixed in #951
    diff = mapping - tf.cast(lcoor, tf.float32)
    neg_diff = 1.0 - diff   #bxh2xw2x2

    lcoory, lcoorx = tf.split(3, 2, lcoor)
    ucoory, ucoorx = tf.split(3, 2, ucoor)

    lyux = tf.concat(3, [lcoory, ucoorx])
    uylx = tf.concat(3, [ucoory, lcoorx])

    diffy, diffx = tf.split(3, 2, diff)
    neg_diffy, neg_diffx = tf.split(3, 2, neg_diff)

    #prod = tf.reduce_prod(diff, 3, keep_dims=True)
    #diff = tf.Print(diff, [tf.is_finite(tf.reduce_sum(diff)), tf.shape(prod),
                          #tf.reduce_max(diff), diff],
                    #summarize=50)

    return tf.add_n([sample(template, lcoor) * neg_diffx * neg_diffy,
           sample(template, ucoor) * diffx * diffy,
           sample(template, lyux) * neg_diffy * diffx,
           sample(template, uylx) * diffy * neg_diffx], name='sampled')