Python tensorflow 模块,Assert() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.Assert()

项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def value_transition(self, curr_state, next_symbols, batch_size):
        first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
        num_value_tokens = self.output_size - first_value_token
        with tf.name_scope('grammar_transition'):
            adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)

            assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
            with tf.control_dependencies([assert1]):
                transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
            assert transitions.get_shape()[1:] == (self.output_size,)

            indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
            next_state = tf.gather_nd(transitions, indices)

            assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
            with tf.control_dependencies([assert2]):
                return tf.identity(next_state)
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def gauss_KL(mu1, logstd1, mu2, logstd2):
    """ Returns KL divergence among two multivariate Gaussians, component-wise.

    It assumes the covariance matrix is diagonal. All inputs have shape (n,a).
    It is not necessary to know the number of actions because reduce_sum will
    sum over this to get the `d` constant offset. The part consisting of the
    trace in the formula is blended with the mean difference squared due to the
    common "denominator" of var2_na.  This forumula generalizes for an arbitrary
    number of actions.  I think mu2 and logstd2 should represent the policy
    before the update.

    Returns the KL divergence for each of the n components in the minibatch,
    then we do a reduce_mean outside this.
    """
    var1_na = tf.exp(2.*logstd1)
    var2_na = tf.exp(2.*logstd2)
    tmp_matrix = 2.*(logstd2 - logstd1) + (var1_na + tf.square(mu1-mu2))/var2_na - 1
    kl_n = tf.reduce_sum(0.5 * tmp_matrix, axis=[1]) # Don't forget the 1/2 !!
    assert_op = tf.Assert(tf.reduce_all(kl_n >= -0.0000001), [kl_n]) 
    with tf.control_dependencies([assert_op]):
        kl_n = tf.identity(kl_n)
    return kl_n
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def get_cubic_root(self):
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    # assert_array = \
    #   [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_nan(self._grad_var) ), [self._grad_var,]),
    #   tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._h_min) ), [self._h_min,]), 
    #   tf.Assert(tf.logical_not(tf.is_inf(self._grad_var) ), [self._grad_var,])]
    # with tf.control_dependencies(assert_array):
    # EPS in the numerator to prevent momentum being exactly one in case of 0 gradient
    p = (self._dist_to_opt_avg + EPS)**2 * (self._h_min + EPS)**2 / 2 / (self._grad_var + EPS)
    w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
    w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
    y = w - p / 3.0 / (w + EPS)
    x = y + 1
    return x
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies([size_assertion], tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:Master-R-CNN    作者:Mark110    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:predictron    作者:brendanator    | 项目源码 | 文件源码
def preturn_network(rewards, discounts, values):
  # First reward must be zero, first discount must be one
  first_reward = tf.Assert(
      tf.reduce_all(tf.equal(rewards[:, 0, :], 0.0)), [rewards[:, 0, :]])
  first_discount = tf.Assert(
      tf.reduce_all(tf.equal(discounts[:, 0, :], 1.0)), [discounts[:, 0, :]])

  with tf.control_dependencies([first_reward, first_discount]):
    with tf.variable_scope('preturn'):
      accum_value_discounts = tf.cumprod(discounts, axis=1, exclusive=False)
      accum_reward_discounts = tf.cumprod(discounts, axis=1, exclusive=True)
      discounted_values = values * accum_value_discounts
      discounted_rewards = rewards * accum_reward_discounts
      cumulative_rewards = tf.cumsum(discounted_rewards, axis=1)
      preturns = cumulative_rewards + discounted_values

      util.activation_summary(preturns)
      return preturns
项目:predictron    作者:brendanator    | 项目源码 | 文件源码
def preturn_network(rewards, discounts, values):
  # First reward must be zero, first discount must be one
  first_reward = tf.Assert(
      tf.reduce_all(tf.equal(rewards[:, 0, :], 0.0)), [rewards[:, 0, :]])
  first_discount = tf.Assert(
      tf.reduce_all(tf.equal(discounts[:, 0, :], 1.0)), [discounts[:, 0, :]])

  with tf.control_dependencies([first_reward, first_discount]):
    with tf.variable_scope('preturn'):
      accum_value_discounts = tf.cumprod(discounts, axis=1, exclusive=False)
      accum_reward_discounts = tf.cumprod(discounts, axis=1, exclusive=True)
      discounted_values = values * accum_value_discounts
      discounted_rewards = rewards * accum_reward_discounts
      cumulative_rewards = tf.cumsum(discounted_rewards, axis=1)
      preturns = cumulative_rewards + discounted_values

      util.activation_summary(preturns)
      return preturns
项目:FastMaskRCNN    作者:CharlesShang    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:tensorflow-pspnet    作者:pudae    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:ActionVLAD    作者:rohitgirdhar    | 项目源码 | 文件源码
def decoderFn(num_samples=1):
  class decoder_func(slim.data_decoder.DataDecoder):
    @staticmethod
    def list_items():
      return ['image', 'label']


    @staticmethod
    def decode(data, items):
      image_buffer = _decode_from_string(data)
      # if num_samples == 1:
        # tf.Assert(tf.shape(image_buffer)[0] == 1, image_buffer)
        # image_buffer = image_buffer[0]
      # else:
      image_buffer = tf.pack(image_buffer)
      return image_buffer
  return decoder_func
项目:TFMaskRCNN    作者:hillox    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.stack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def get_function_init_state(self, function_tokens):
        next_state = tf.gather(self.function_states, function_tokens - (self.num_begin_tokens + self.num_control_tokens))
        assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [function_tokens])
        with tf.control_dependencies([assert2]):
            return tf.identity(next_state)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _tf_nth(fns, n):
  """Runs only the nth element of fns, where n is a scalar integer tensor."""
  cases = [(tf.equal(tf.constant(i, n.dtype), n), fn)
           for i, fn in enumerate(fns)]
  final_pred, final_fn = cases.pop()
  def default():
    with tf.control_dependencies([
        tf.Assert(final_pred, [n, len(fns)], name='nth_index_error')]):
      return final_fn()
  if len(fns) == 1: return default()
  return tf.case(cases, default)
项目:Master-R-CNN    作者:Mark110    | 项目源码 | 文件源码
def crop(images, boxes, batch_inds, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'):
  """Cropping areas of features into fixed size
  Params:
  --------
  images: a 4-d Tensor of shape (N, H, W, C)
  boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2]
  batch_inds: 

  Returns:
  --------
  A Tensor of shape (N, pooled_height, pooled_width, C)
  """
  with tf.name_scope(scope):
    #
    boxes = boxes / (stride + 0.0)
    boxes = tf.reshape(boxes, [-1, 4])

    # normalize the boxes and swap x y dimensions
    shape = tf.shape(images)
    boxes = tf.reshape(boxes, [-1, 2]) # to (x, y)
    xs = boxes[:, 0] 
    ys = boxes[:, 1]
    xs = xs / tf.cast(shape[2], tf.float32)
    ys = ys / tf.cast(shape[1], tf.float32)
    boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1)
    boxes = tf.reshape(boxes, [-1, 4])  # to (y1, x1, y2, x2)

    # if batch_inds is False:
    #   num_boxes = tf.shape(boxes)[0]
    #   batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds')
    # batch_inds = boxes[:, 0] * 0
    # batch_inds = tf.cast(batch_inds, tf.int32)

    # assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds])
    assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds])
    with tf.control_dependencies([assert_op, images, batch_inds]):
        return  tf.image.crop_and_resize(images, boxes, batch_inds,
                                         [pooled_height, pooled_width],
                                         method='bilinear',
                                         name='Crop')
项目:Classification_Nets    作者:BobLiu20    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _get_cubic_root(self):
        """Get the cubic root."""
        # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
        # where x = sqrt(mu).
        # We substitute x, which is sqrt(mu), with x = y + 1.
        # It gives y^3 + py = q
        # where p = (D^2 h_min^2)/(2*C) and q = -p.
        # We use the Vieta's substution to compute the root.
        # There is only one real solution y (which is in [0, 1] ).
        # http://mathworld.wolfram.com/VietasSubstitution.html
        assert_array = [
            tf.Assert(
                tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
                [self._dist_to_opt_avg, ]),
            tf.Assert(
                tf.logical_not(tf.is_nan(self._h_min)),
                [self._h_min, ]),
            tf.Assert(
                tf.logical_not(tf.is_nan(self._grad_var)),
                [self._grad_var, ]),
            tf.Assert(
                tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
                [self._dist_to_opt_avg, ]),
            tf.Assert(
                tf.logical_not(tf.is_inf(self._h_min)),
                [self._h_min, ]),
            tf.Assert(
                tf.logical_not(tf.is_inf(self._grad_var)),
                [self._grad_var, ])
        ]
        with tf.control_dependencies(assert_array):
            p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
            w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
            w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0 / 3.0)
            y = w - p / 3.0 / w
            x = y + 1
        return x
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def correlation_loss(source_samples, target_samples, weight, name='corr_loss'):
    """Adds a similarity loss term, the correlation between two representations.

    Args:
        source_samples: a tensor of shape [num_samples, num_features]
        target_samples: a tensor of shape [num_samples, num_features]
        weight: a scalar weight for the loss.
        scope: optional name scope for summary tags.

    Returns:
        a scalar tensor representing the correlation loss value.
    """
    with tf.name_scope(name):
        source_samples -= tf.reduce_mean(source_samples, 0)
        target_samples -= tf.reduce_mean(target_samples, 0)
        source_samples = tf.nn.l2_normalize(source_samples, 1)
        target_samples = tf.nn.l2_normalize(target_samples, 1)
        source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
        target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
        corr_loss = tf.reduce_mean(
            tf.square(source_cov - target_cov)) * weight

    assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
    with tf.control_dependencies([assert_op]):
        tag = 'Correlation Loss'
        barrier = tf.no_op(tag)

    return corr_loss
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):
    """Adds a similarity loss term, the MMD between two representations.

    This Maximum Mean Discrepancy (MMD) loss is calculated with a number of
    different Gaussian kernels.

    Args:
      source_samples: a tensor of shape [num_samples, num_features].
      target_samples: a tensor of shape [num_samples, num_features].
      weight: the weight of the MMD loss.
      scope: optional name scope for summary tags.

    Returns:
      a scalar tensor representing the MMD loss value.
    """
    with tf.name_scope(name):
        sigmas = [
            1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,
            1e3, 1e4, 1e5, 1e6
        ]
        gaussian_kernel = partial(
            util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))

        loss_value = maximum_mean_discrepancy(
            source_samples, target_samples, kernel=gaussian_kernel)
        loss_value = tf.maximum(1e-4, loss_value) * weight
    assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])
    with tf.control_dependencies([assert_op]):
        tag = 'MMD Loss'
        barrier = tf.no_op(tag)
    return loss_value
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def dann_loss(source_samples, target_samples, weight, name='dann_loss'):
    """Adds the domain adversarial (DANN) loss

    Args:
      source_samples: a tensor of shape [num_samples, num_features].
      target_samples: a tensor of shape [num_samples, num_features].
      weight: the weight of the loss.
      scope: optional name scope for summary tags.

    Returns:
      a scalar tensor representing the correlation loss value.
    """
    with tf.variable_scope(name):
        batch_size = tf.shape(source_samples)[0]
        samples = tf.concat(values=[source_samples, target_samples], axis=0)
        samples = flatten(samples)

        domain_selection_mask = tf.concat(
            values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)

        grl = gradient_reverse(samples)
        grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))

        grl = fc(grl, 100, True, None, activation=relu, name='fc1')
        logits = fc(grl, 1, True, None, activation=None, name='fc2')

        domain_predictions = tf.sigmoid(logits)

    domain_loss = tf.losses.log_loss(
        domain_selection_mask, domain_predictions, weights=weight)

    domain_accuracy = util.accuracy_tf(domain_selection_mask,
                                       tf.round(domain_predictions))

    assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
    with tf.control_dependencies([assert_op]):
        tag_loss = 'losses/domain_loss'
        barrier = tf.no_op(tag_loss)

    return domain_loss
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'):
    """Adds the difference loss between the private and shared representations.

    Args:
      private_samples: a tensor of shape [num_samples, num_features].
      shared_samples: a tensor of shape [num_samples, num_features].
      weight: the weight of the incoherence loss.
      name: the name of the tf summary.
    """
    with tf.name_scope(name):
        private_samples -= tf.reduce_mean(private_samples, 0)
        shared_samples -= tf.reduce_mean(shared_samples, 0)

        private_samples = tf.nn.l2_normalize(private_samples, 1)
        shared_samples = tf.nn.l2_normalize(shared_samples, 1)

        correlation_matrix = tf.matmul(
            private_samples, shared_samples, transpose_a=True)

        cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
        cost = tf.where(cost > 0, cost, 0, name='value')

    assert_op = tf.Assert(tf.is_finite(cost), [cost])
    with tf.control_dependencies([assert_op]):
        barrier = tf.no_op(name)
    return cost
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
        """Crops the given image using the provided offsets and sizes.
        Note that the method doesn't assume we know the input image size but it does
        assume we know the input image rank.

        Args:
          image: an image of shape [height, width, channels].
          offset_height: a scalar tensor indicating the height offset.
          offset_width: a scalar tensor indicating the width offset.
          crop_height: the height of the cropped image.
          crop_width: the width of the cropped image.

        Returns:
          the cropped (and resized) image.

        Raises:
          InvalidArgumentError: if the rank is not 3 or if the image dimensions are
            less than the crop size.
        """
        original_shape = tf.shape(image)

        rank_assertion = tf.Assert(
            tf.equal(tf.rank(image), 3),
            ['Rank of image must be equal to 3.'])
        with tf.control_dependencies([rank_assertion]):
            cropped_shape = tf.stack(
                [crop_height, crop_width, original_shape[2]])

        size_assertion = tf.Assert(
            tf.logical_and(
                tf.greater_equal(original_shape[0], crop_height),
                tf.greater_equal(original_shape[1], crop_width)),
            ['Crop size greater than the image size.'])

        offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
        with tf.control_dependencies([size_assertion]):
            image = tf.slice(image, offsets, cropped_shape)
        return tf.reshape(image, cropped_shape)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _crop(self, image, offset_height, offset_width, crop_height, crop_width):
        """Crops the given image using the provided offsets and sizes.
        Note that the method doesn't assume we know the input image size but it does
        assume we know the input image rank.

        Args:
          image: an image of shape [height, width, channels].
          offset_height: a scalar tensor indicating the height offset.
          offset_width: a scalar tensor indicating the width offset.
          crop_height: the height of the cropped image.
          crop_width: the width of the cropped image.

        Returns:
          the cropped (and resized) image.

        Raises:
          InvalidArgumentError: if the rank is not 3 or if the image dimensions are
            less than the crop size.
        """
        original_shape = tf.shape(image)

        rank_assertion = tf.Assert(
            tf.equal(tf.rank(image), 3),
            ['Rank of image must be equal to 3.'])
        with tf.control_dependencies([rank_assertion]):
            cropped_shape = tf.stack(
                [crop_height, crop_width, original_shape[2]])

        size_assertion = tf.Assert(
            tf.logical_and(
                tf.greater_equal(original_shape[0], crop_height),
                tf.greater_equal(original_shape[1], crop_width)),
            ['Crop size greater than the image size.'])

        offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
        with tf.control_dependencies([size_assertion]):
            image = tf.slice(image, offsets, cropped_shape)
        return tf.reshape(image, cropped_shape)
项目:fcn    作者:ilovin    | 项目源码 | 文件源码
def crop_to_fixed_size(img_tensor,annotation_tensor,output_shape):
    """
    the output_shape must be smaller than the input_shape
    :param img_tensor: [w,h,depth]
    :param annotation_tensor: [w,h,1]
    :param output_shape:
    :param mask_out_num:
    :return: (output_shape,output_shape,3) (output_shape,output_shape,1)
    """
    original_shape = tf.shape(img_tensor)
    crop_width, crop_height = output_shape[0],output_shape[1]
    image_width, image_height = original_shape[0],original_shape[1]
    img_cropped_shape = tf.stack([output_shape[0], output_shape[1], 3])
    annotate_cropped_shape = tf.stack([output_shape[0], output_shape[1], 1])

    size_assertion = tf.Assert(
        tf.logical_and(
            tf.greater_equal(original_shape[0], crop_width),
            tf.greater_equal(original_shape[1], crop_height)),
        ['Crop size greater than the image size.'])
    max_offset_height = tf.reshape(image_height - crop_height + 1, [])
    max_offset_width = tf.reshape(image_width - crop_width + 1, [])

    offset_height = tf.random_uniform(
        [], maxval=max_offset_height, dtype=tf.int32)
    offset_width = tf.random_uniform(
        [], maxval=max_offset_width, dtype=tf.int32)
    offsets = tf.to_int32(tf.stack([offset_width, offset_height, 0]))

    annotation_tensor = tf.to_int32(annotation_tensor)
    # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
    # define the crop size.
    with tf.control_dependencies([size_assertion]):
        image = tf.slice(img_tensor, offsets, img_cropped_shape)
        annotate = tf.slice(annotation_tensor,offsets,annotate_cropped_shape)
    return tf.reshape(image, img_cropped_shape),tf.reshape(annotate,annotate_cropped_shape)
项目:fcn    作者:ilovin    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:segmentation-models    作者:desimone    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

      Note that the method doesn't assume we know the input image size but it does
      assume we know the input image rank.

      Args:
        image: an image of shape [height, width, channels].
        offset_height: a scalar tensor indicating the height offset.
        offset_width: a scalar tensor indicating the width offset.
        crop_height: the height of the cropped image.
        crop_width: the width of the cropped image.

      Returns:
        the cropped (and resized) image.

      Raises:
        InvalidArgumentError: if the rank is not 3 or if the image dimensions are
          less than the crop size.
      """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion], tf.pack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion], tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:convolutional-vqa    作者:paarthneekhara    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:LargeNumberClasses    作者:maodayezheng    | 项目源码 | 文件源码
def loss(self, x, h, q=None):
        """
            Calculate the estimate loss of Importance sampling approximation

            @Param x(NxD): The target word or batch
            @Param h(NxD): This is usually the output of neural network
            @Param q(N): The Weight of target
        """
        # K
        weights = self.get_sample_weights()
        tf.Assert(tf.equal(weights, 0.0), [weights])
        if weights is None:
            raise ValueError("sample weights must be set")
        # KxD
        samples = self.get_samples()
        if samples is None:
            raise ValueError("samples must be set")
        # N
        target_scores = tf.reduce_sum(x * h, 1)
        self.target_exp_ = tf.exp(target_scores)
        # N x K
        samples_scores = tf.matmul(h, samples, transpose_b=True)
        # N
        exp_weight = tf.exp(samples_scores) / weights
        self.Z_ = tf.reduce_sum(tf.check_numerics(exp_weight, "each Z "), 1)

        # The loss of each element in target
        # N
        element_loss = target_scores - tf.log(q) - tf.log(self.Z_)
        loss = tf.reduce_mean(element_loss)
        return -loss
项目:predictron    作者:brendanator    | 项目源码 | 文件源码
def lambda_preturn_network(preturns, lambdas):
  # Final lamdba must be zero
  final_lambda = tf.Assert(
      tf.reduce_all(tf.equal(lambdas[:, -1, :], 0.0)), [lambdas[:, -1, :]])

  with tf.control_dependencies([final_lambda]):
    with tf.variable_scope('lambda_preturn'):
      accum_lambda = tf.cumprod(lambdas, axis=1, exclusive=True)
      lambda_bar = (1 - lambdas) * accum_lambda  # This should always sum to 1
      lambda_preturn = tf.reduce_sum(
          lambda_bar * preturns, reduction_indices=1)

      util.activation_summary(lambda_preturn)
      return lambda_preturn
项目:predictron    作者:brendanator    | 项目源码 | 文件源码
def lambda_preturn_network(preturns, lambdas):
  # Final lamdba must be zero
  final_lambda = tf.Assert(
      tf.reduce_all(tf.equal(lambdas[:, -1, :], 0.0)), [lambdas[:, -1, :]])

  with tf.control_dependencies([final_lambda]):
    with tf.variable_scope('lambda_preturn'):
      accum_lambda = tf.cumprod(lambdas, axis=1, exclusive=True)
      lambda_bar = (1 - lambdas) * accum_lambda  # This should always sum to 1
      lambda_preturn = tf.reduce_sum(
          lambda_bar * preturns, reduction_indices=1)

      util.activation_summary(lambda_preturn)
      return lambda_preturn
项目:Neural-Turing-Machine    作者:yeoedward    | 项目源码 | 文件源码
def deserialize(self, state):
    # Deserialize state from previous timestep.
    M0 = tf.slice(
      state,
      [0, 0],
      [-1, self.mem_nrows * self.mem_ncols],
    )
    M0 = tf.reshape(M0, [-1, self.mem_nrows, self.mem_ncols])

    state_idx = self.mem_nrows * self.mem_ncols

    # Deserialize read weights from previous time step.
    read_w0s = []
    for i in xrange(self.n_heads):
      # Number of weights == Rows of memory matrix
      w0 = tf.slice(state, [0, state_idx], [-1, self.mem_nrows])
      read_w0s.append(w0)
      state_idx += self.mem_nrows

    # Do the same for write heads.
    write_w0s = []
    for _ in xrange(self.n_heads):
      w0 = tf.slice(state, [0, state_idx], [-1, self.mem_nrows])
      write_w0s.append(w0)
      state_idx += self.mem_nrows

    tf.Assert(
      tf.equal(state_idx, tf.shape(state)[1]),
      [tf.shape(state)],
    )

    return M0, write_w0s, read_w0s
项目:Densenet    作者:bysowhat    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:Densenet    作者:bysowhat    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:resnet    作者:renmengye    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.
  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.
  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.
  Returns:
    the cropped (and resized) image.
  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  cropped_shape = control_flow_ops.with_dependencies(
      [rank_assertion],
      tf.pack([crop_height, crop_width, original_shape[2]]))

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  image = control_flow_ops.with_dependencies(
      [size_assertion],
      tf.slice(image, offsets, cropped_shape))
  return tf.reshape(image, cropped_shape)
项目:tpu-demos    作者:tensorflow    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:FastMaskRCNN    作者:CharlesShang    | 项目源码 | 文件源码
def crop(images, boxes, batch_inds, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'):
  """Cropping areas of features into fixed size
  Params:
  --------
  images: a 4-d Tensor of shape (N, H, W, C)
  boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2]
  batch_inds: 

  Returns:
  --------
  A Tensor of shape (N, pooled_height, pooled_width, C)
  """
  with tf.name_scope(scope):
    #
    boxes = boxes / (stride + 0.0)
    boxes = tf.reshape(boxes, [-1, 4])

    # normalize the boxes and swap x y dimensions
    shape = tf.shape(images)
    boxes = tf.reshape(boxes, [-1, 2]) # to (x, y)
    xs = boxes[:, 0] 
    ys = boxes[:, 1]
    xs = xs / tf.cast(shape[2], tf.float32)
    ys = ys / tf.cast(shape[1], tf.float32)
    boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1)
    boxes = tf.reshape(boxes, [-1, 4])  # to (y1, x1, y2, x2)

    # if batch_inds is False:
    #   num_boxes = tf.shape(boxes)[0]
    #   batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds')
    # batch_inds = boxes[:, 0] * 0
    # batch_inds = tf.cast(batch_inds, tf.int32)

    # assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds])
    assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds])
    with tf.control_dependencies([assert_op, images, batch_inds]):
        return  tf.image.crop_and_resize(images, boxes, batch_inds,
                                         [pooled_height, pooled_width],
                                         method='bilinear',
                                         name='Crop')
项目:FastMaskRCNN    作者:CharlesShang    | 项目源码 | 文件源码
def crop_(images, boxes, batch_inds, ih, iw, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'):
  """Cropping areas of features into fixed size
  Params:
  --------
  images: a 4-d Tensor of shape (N, H, W, C)
  boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2]
  batch_inds: 

  Returns:
  --------
  A Tensor of shape (N, pooled_height, pooled_width, C)
  """
  with tf.name_scope(scope):
    #
    boxes = boxes / (stride + 0.0)
    boxes = tf.reshape(boxes, [-1, 4])

    # normalize the boxes and swap x y dimensions
    shape = tf.shape(images)
    boxes = tf.reshape(boxes, [-1, 2]) # to (x, y)
    xs = boxes[:, 0] 
    ys = boxes[:, 1]
    xs = xs / tf.cast(shape[2], tf.float32)
    ys = ys / tf.cast(shape[1], tf.float32)
    boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1)
    boxes = tf.reshape(boxes, [-1, 4])  # to (y1, x1, y2, x2)

    # if batch_inds is False:
    #   num_boxes = tf.shape(boxes)[0]
    #   batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds')
    # batch_inds = boxes[:, 0] * 0
    # batch_inds = tf.cast(batch_inds, tf.int32)

    # assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds])
    assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds])
    with tf.control_dependencies([assert_op, images, batch_inds]):
        return  [tf.image.crop_and_resize(images, boxes, batch_inds,
                                         [pooled_height, pooled_width],
                                         method='bilinear',
                                         name='Crop')] + [boxes]
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def _get_cubic_root(self):
    """Get the cubic root."""
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    assert_array = [
        tf.Assert(
            tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
            [self._dist_to_opt_avg,]),
        tf.Assert(
            tf.logical_not(tf.is_nan(self._h_min)),
            [self._h_min,]),
        tf.Assert(
            tf.logical_not(tf.is_nan(self._grad_var)),
            [self._grad_var,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
            [self._dist_to_opt_avg,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._h_min)),
            [self._h_min,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._grad_var)),
            [self._grad_var,])
    ]
    with tf.control_dependencies(assert_array):
      p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
      w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
      w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
      y = w - p / 3.0 / w
      x = y + 1
    return x
项目:tensorflow-talk-debugging    作者:wookayin    | 项目源码 | 文件源码
def multilayer_perceptron(x):
    fc1 = layers.fully_connected(x, 256, activation_fn=tf.nn.relu, scope='fc1')
    fc2 = layers.fully_connected(fc1, 256, activation_fn=tf.nn.relu, scope='fc2')
    out = layers.fully_connected(fc2, 10, activation_fn=None, scope='out')
    tf.add_to_collection('Asserts', tf.Assert(tf.reduce_all(out > 0), [out], name='assert_out_positive'))     # (*)
    return out

# build model, loss, and train op
项目:tensorflow-talk-debugging    作者:wookayin    | 项目源码 | 文件源码
def multilayer_perceptron(x):
    fc1 = layers.fully_connected(x, 256, activation_fn=tf.nn.relu, scope='fc1')
    fc2 = layers.fully_connected(fc1, 256, activation_fn=tf.nn.relu, scope='fc2')
    out = layers.fully_connected(fc2, 10, activation_fn=None, scope='out')
    tf.Assert(out > 0, [out], name='assert_out_positive')
    return out

# build model, loss, and train op
项目:tensorflow-talk-debugging    作者:wookayin    | 项目源码 | 文件源码
def multilayer_perceptron(x):
    fc1 = layers.fully_connected(x, 256, activation_fn=tf.nn.relu, scope='fc1')
    fc2 = layers.fully_connected(fc1, 256, activation_fn=tf.nn.relu, scope='fc2')
    out = layers.fully_connected(fc2, 10, activation_fn=None, scope='out')
    assert_op = tf.Assert(tf.reduce_all(out > 0), [out], name='assert_out_positive')
    #out = tf.with_dependencies([assert_op], out)
    with tf.control_dependencies([assert_op]):
        out = tf.identity(out, name='out')
    return out

# build model, loss, and train op
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def _mapper(self, grad, var):
        # this is very slow...
        #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
        grad = tf.check_numerics(grad, 'CheckGradient')
        return grad
项目:places365-tf    作者:baileyqbb    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:tensorflow-densenet    作者:pudae    | 项目源码 | 文件源码
def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    the cropped (and resized) image.

  Raises:
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  return tf.reshape(image, cropped_shape)
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def to_normalized_coordinates(keypoints, height, width,
                              check_range=True, scope=None):
  """Converts absolute keypoint coordinates to normalized coordinates in [0, 1].

  Usually one uses the dynamic shape of the image or conv-layer tensor:
    keypoints = keypoint_ops.to_normalized_coordinates(keypoints,
                                                       tf.shape(images)[1],
                                                       tf.shape(images)[2]),

  This function raises an assertion failed error at graph execution time when
  the maximum coordinate is smaller than 1.01 (which means that coordinates are
  already normalized). The value 1.01 is to deal with small rounding errors.

  Args:
    keypoints: A tensor of shape [num_instances, num_keypoints, 2].
    height: Maximum value for y coordinate of absolute keypoint coordinates.
    width: Maximum value for x coordinate of absolute keypoint coordinates.
    check_range: If True, checks if the coordinates are normalized.
    scope: name scope.

  Returns:
    tensor of shape [num_instances, num_keypoints, 2] with normalized
    coordinates in [0, 1].
  """
  with tf.name_scope(scope, 'ToNormalizedCoordinates'):
    height = tf.cast(height, tf.float32)
    width = tf.cast(width, tf.float32)

    if check_range:
      max_val = tf.reduce_max(keypoints)
      max_assert = tf.Assert(tf.greater(max_val, 1.01),
                             ['max value is lower than 1.01: ', max_val])
      with tf.control_dependencies([max_assert]):
        width = tf.identity(width)

    return scale(keypoints, 1.0 / height, 1.0 / width)
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def to_absolute_coordinates(keypoints, height, width,
                            check_range=True, scope=None):
  """Converts normalized keypoint coordinates to absolute pixel coordinates.

  This function raises an assertion failed error when the maximum keypoint
  coordinate value is larger than 1.01 (in which case coordinates are already
  absolute).

  Args:
    keypoints: A tensor of shape [num_instances, num_keypoints, 2]
    height: Maximum value for y coordinate of absolute keypoint coordinates.
    width: Maximum value for x coordinate of absolute keypoint coordinates.
    check_range: If True, checks if the coordinates are normalized or not.
    scope: name scope.

  Returns:
    tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates
    in terms of the image size.

  """
  with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
    height = tf.cast(height, tf.float32)
    width = tf.cast(width, tf.float32)

    # Ensure range of input keypoints is correct.
    if check_range:
      max_val = tf.reduce_max(keypoints)
      max_assert = tf.Assert(tf.greater_equal(1.01, max_val),
                             ['maximum keypoint coordinate value is larger '
                              'than 1.01: ', max_val])
      with tf.control_dependencies([max_assert]):
        width = tf.identity(width)

    return scale(keypoints, height, width)
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):
  """Sort boxes and associated fields according to a scalar field.

  A common use case is reordering the boxes according to descending scores.

  Args:
    boxlist: BoxList holding N boxes.
    field: A BoxList field for sorting and reordering the BoxList.
    order: (Optional) descend or ascend. Default is descend.
    scope: name scope.

  Returns:
    sorted_boxlist: A sorted BoxList with the field in the specified order.

  Raises:
    ValueError: if specified field does not exist
    ValueError: if the order is not either descend or ascend
  """
  with tf.name_scope(scope, 'SortByField'):
    if order != SortOrder.descend and order != SortOrder.ascend:
      raise ValueError('Invalid sort order')

    field_to_sort = boxlist.get_field(field)
    if len(field_to_sort.shape.as_list()) != 1:
      raise ValueError('Field should have rank 1')

    num_boxes = boxlist.num_boxes()
    num_entries = tf.size(field_to_sort)
    length_assert = tf.Assert(
        tf.equal(num_boxes, num_entries),
        ['Incorrect field size: actual vs expected.', num_entries, num_boxes])

    with tf.control_dependencies([length_assert]):
      # TODO: Remove with tf.device when top_k operation runs correctly on GPU.
      with tf.device('/cpu:0'):
        _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)

    if order == SortOrder.ascend:
      sorted_indices = tf.reverse_v2(sorted_indices, [0])

    return gather(boxlist, sorted_indices)
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def to_normalized_coordinates(boxlist, height, width,
                              check_range=True, scope=None):
  """Converts absolute box coordinates to normalized coordinates in [0, 1].

  Usually one uses the dynamic shape of the image or conv-layer tensor:
    boxlist = box_list_ops.to_normalized_coordinates(boxlist,
                                                     tf.shape(images)[1],
                                                     tf.shape(images)[2]),

  This function raises an assertion failed error at graph execution time when
  the maximum coordinate is smaller than 1.01 (which means that coordinates are
  already normalized). The value 1.01 is to deal with small rounding errors.

  Args:
    boxlist: BoxList with coordinates in terms of pixel-locations.
    height: Maximum value for height of absolute box coordinates.
    width: Maximum value for width of absolute box coordinates.
    check_range: If True, checks if the coordinates are normalized or not.
    scope: name scope.

  Returns:
    boxlist with normalized coordinates in [0, 1].
  """
  with tf.name_scope(scope, 'ToNormalizedCoordinates'):
    height = tf.cast(height, tf.float32)
    width = tf.cast(width, tf.float32)

    if check_range:
      max_val = tf.reduce_max(boxlist.get())
      max_assert = tf.Assert(tf.greater(max_val, 1.01),
                             ['max value is lower than 1.01: ', max_val])
      with tf.control_dependencies([max_assert]):
        width = tf.identity(width)

    return scale(boxlist, 1 / height, 1 / width)
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def to_absolute_coordinates(boxlist, height, width,
                            check_range=True, scope=None):
  """Converts normalized box coordinates to absolute pixel coordinates.

  This function raises an assertion failed error when the maximum box coordinate
  value is larger than 1.01 (in which case coordinates are already absolute).

  Args:
    boxlist: BoxList with coordinates in range [0, 1].
    height: Maximum value for height of absolute box coordinates.
    width: Maximum value for width of absolute box coordinates.
    check_range: If True, checks if the coordinates are normalized or not.
    scope: name scope.

  Returns:
    boxlist with absolute coordinates in terms of the image size.

  """
  with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
    height = tf.cast(height, tf.float32)
    width = tf.cast(width, tf.float32)

    # Ensure range of input boxes is correct.
    if check_range:
      box_maximum = tf.reduce_max(boxlist.get())
      max_assert = tf.Assert(tf.greater_equal(1.01, box_maximum),
                             ['maximum box coordinate value is larger '
                              'than 1.01: ', box_maximum])
      with tf.control_dependencies([max_assert]):
        width = tf.identity(width)

    return scale(boxlist, height, width)
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs.get_shape().assert_has_rank(4)
    shape_assert = tf.Assert(
        tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
                       tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
        ['image size must at least be 33 in both height and width.'])

    feature_map_layout = {
        'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
                       '', ''],
        'layer_depth': [-1, -1, 512, 256, 256, 128],
    }

    with tf.control_dependencies([shape_assert]):
      with slim.arg_scope(self._conv_hyperparams):
        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              preprocessed_inputs,
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
          feature_maps = feature_map_generators.multi_resolution_feature_maps(
              feature_map_layout=feature_map_layout,
              depth_multiplier=self._depth_multiplier,
              min_depth=self._min_depth,
              insert_1x1_conv=True,
              image_features=image_features)

    return feature_maps.values()
项目:TFMaskRCNN    作者:hillox    | 项目源码 | 文件源码
def crop(images, boxes, batch_inds, stride = 1, pooled_height = 7, pooled_width = 7, scope='ROIAlign'):
  """Cropping areas of features into fixed size
  Params:
  --------
  images: a 4-d Tensor of shape (N, H, W, C)
  boxes: rois in the original image, of shape (N, ..., 4), [x1, y1, x2, y2]
  batch_inds: 

  Returns:
  --------
  A Tensor of shape (N, pooled_height, pooled_width, C)
  """
  with tf.name_scope(scope):
    #
    boxes = boxes / (stride + 0.0)
    boxes = tf.reshape(boxes, [-1, 4])

    # normalize the boxes and swap x y dimensions
    shape = tf.shape(images)
    boxes = tf.reshape(boxes, [-1, 2]) # to (x, y)
    xs = boxes[:, 0] 
    ys = boxes[:, 1]
    xs = xs / tf.cast(shape[2], tf.float32)
    ys = ys / tf.cast(shape[1], tf.float32)
    boxes = tf.concat([ys[:, tf.newaxis], xs[:, tf.newaxis]], axis=1)
    boxes = tf.reshape(boxes, [-1, 4])  # to (y1, x1, y2, x2)

    # if batch_inds is False:
    #   num_boxes = tf.shape(boxes)[0]
    #   batch_inds = tf.zeros([num_boxes], dtype=tf.int32, name='batch_inds')
    # batch_inds = boxes[:, 0] * 0
    # batch_inds = tf.cast(batch_inds, tf.int32)

    # assert_op = tf.Assert(tf.greater(tf.shape(images)[0], tf.reduce_max(batch_inds)), [images, batch_inds])
    assert_op = tf.Assert(tf.greater(tf.size(images), 0), [images, batch_inds])
    with tf.control_dependencies([assert_op, images, batch_inds]):
        return  tf.image.crop_and_resize(images, boxes, batch_inds,
                                         [pooled_height, pooled_width],
                                         method='bilinear',
                                         name='Crop')