Python tensorflow 模块,less_equal() 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用tensorflow.less_equal()

项目:reslearn    作者:mackcmillion    | 项目源码 | 文件源码
def _resize_aux(image, new_shorter_edge_tensor):
    shape = tf.shape(image)
    height = shape[0]
    width = shape[1]

    height_smaller_than_width = tf.less_equal(height, width)
    new_height_and_width = cf.cond(
            height_smaller_than_width,
            lambda: (new_shorter_edge_tensor, _compute_longer_edge(height, width, new_shorter_edge_tensor)),
            lambda: (_compute_longer_edge(width, height, new_shorter_edge_tensor), new_shorter_edge_tensor)
    )

    # workaround since tf.image.resize_images() does not work
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image, tf.pack(new_height_and_width))
    return tf.squeeze(image, [0])
项目:sequencing    作者:SwordYork    | 项目源码 | 文件源码
def next_inputs(self, time, sample_ids=None, prev_finished=None):
        if sample_ids is None or self.teacher_rate > 0.:
            finished = tf.greater_equal(time + 1, self.sequence_length)
        else:
            finished = math_ops.logical_or(
                tf.greater_equal(time + 1, self.max_step),
                tf.equal(self.eos_id, sample_ids))

        if self.teacher_rate == 1. or (sample_ids is None):
            next_input_ids = self._input_tas.read(time)
            return finished, self.lookup(next_input_ids)

        if self.teacher_rate > 0.:
            # scheduled
            teacher_rates = tf.less_equal(
                tf.random_uniform(tf.shape(sample_ids), minval=0., maxval=1.),
                self.teacher_rate)
            teacher_rates = tf.to_int32(teacher_rates)

            next_input_ids = (teacher_rates * self._input_tas.read(time)
                              + (1 - teacher_rates) * sample_ids)
        else:
            next_input_ids = sample_ids

        return finished, self.lookup(next_input_ids)
项目:attention-ocr    作者:emedvedev    | 项目源码 | 文件源码
def _prepare_image(self, image):
        """Resize the image to a maximum height of `self.height` and maximum
        width of `self.width` while maintaining the aspect ratio. Pad the
        resized image to a fixed size of ``[self.height, self.width]``."""
        img = tf.image.decode_png(image, channels=self.channels)
        dims = tf.shape(img)
        self.width = self.max_width

        max_width = tf.to_int32(tf.ceil(tf.truediv(dims[1], dims[0]) * self.height_float))
        max_height = tf.to_int32(tf.ceil(tf.truediv(self.width, max_width) * self.height_float))

        resized = tf.cond(
            tf.greater_equal(self.width, max_width),
            lambda: tf.cond(
                tf.less_equal(dims[0], self.height),
                lambda: tf.to_float(img),
                lambda: tf.image.resize_images(img, [self.height, max_width],
                                               method=tf.image.ResizeMethod.BICUBIC),
            ),
            lambda: tf.image.resize_images(img, [max_height, self.width],
                                           method=tf.image.ResizeMethod.BICUBIC)
        )

        padded = tf.image.pad_to_bounding_box(resized, 0, 0, self.height, self.width)
        return padded
项目:pytruenorth    作者:vmonaco    | 项目源码 | 文件源码
def normal_ccdf(x, mu, sigma2):
    """Normal CCDF"""
    # Check for degenerate distributions when sigma2 == 0
    # if x >= mu, n = 0
    # if x < mu, n = 1
    # sigma2_le_0 = tf.less_equal(sigma2, 0.)
    # x_gte_mu = tf.greater_equal(x, mu)
    # x_lt_mu = tf.less(x, mu)

    # Never divide by zero, instead the logic below handles degenerate distribution cases
    # sigma2 = tf.cond(sigma2_le_0, lambda: tf.ones_like(sigma2), lambda: sigma2)

    p = (1. - 0.5 * (1. + tf.erf((x - mu) / tf.sqrt(2. * sigma2))))
    # p = tf.cond(tf.logical_and(sigma2_le_0, x_gte_mu), lambda: tf.zeros_like(p), lambda: p)
    # p = tf.cond(tf.logical_and(sigma2_le_0, x_lt_mu), lambda: tf.ones_like(p), lambda: p)
    return p
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_)
项目:tensorrec    作者:jfkirk    | 项目源码 | 文件源码
def separation_loss(tf_prediction_serial, tf_interactions_serial, **kwargs):
    """
    This loss function models the explicit positive and negative interaction predictions as normal distributions and
    returns the probability of overlap between the two distributions.
    :param tf_prediction_serial:
    :param tf_interactions_serial:
    :return:
    """

    tf_positive_mask = tf.greater(tf_interactions_serial, 0.0)
    tf_negative_mask = tf.less_equal(tf_interactions_serial, 0.0)

    tf_positive_predictions = tf.boolean_mask(tf_prediction_serial, tf_positive_mask)
    tf_negative_predictions = tf.boolean_mask(tf_prediction_serial, tf_negative_mask)

    tf_pos_mean, tf_pos_var = tf.nn.moments(tf_positive_predictions, axes=[0])
    tf_neg_mean, tf_neg_var = tf.nn.moments(tf_negative_predictions, axes=[0])

    tf_overlap_distribution = tf.contrib.distributions.Normal(loc=(tf_neg_mean - tf_pos_mean),
                                                              scale=tf.sqrt(tf_neg_var + tf_pos_var))

    loss = 1.0 - tf_overlap_distribution.cdf(0.0)
    return loss
项目:cnn_lstm_ctc_ocr    作者:weinman    | 项目源码 | 文件源码
def _get_input_filter(width, width_threshold, length, length_threshold):
    """Boolean op for discarding input data based on string or image size
    Input:
      width            : Tensor representing the image width
      width_threshold  : Python numerical value (or None) representing the 
                         maximum allowable input image width 
      length           : Tensor representing the ground truth string length
      length_threshold : Python numerical value (or None) representing the 
                         maximum allowable input string length
   Returns:
      keep_input : Boolean Tensor indicating whether to keep a given input 
                  with the specified image width and string length
"""

    keep_input = None

    if width_threshold!=None:
        keep_input = tf.less_equal(width, width_threshold)

    if length_threshold!=None:
        length_filter = tf.less_equal(length, length_threshold)
        if keep_input==None:
            keep_input = length_filter 
        else:
            keep_input = tf.logical_and( keep_input, length_filter)

    if keep_input==None:
        keep_input = True
    else:
        keep_input = tf.reshape( keep_input, [] ) # explicitly make a scalar

    return keep_input
项目:cwt-tensorflow    作者:nickgeoca    | 项目源码 | 文件源码
def cwt(wav, widthCwt, wavelet):
    length = wav.shape[0]
    wav = tf.to_float(wav)
    wav = tf.reshape(wav, [1,length,1,1])

    # While loop functions
    def body(i, m): 
        v = conv1DWavelet(wav, i, wavelet)
        v = tf.reshape(v, [length, 1])

        m = tf.concat([m,v], 1)

        return [1 + i, m]

    def cond_(i, m):
        return tf.less_equal(i, widthCwt)

    # Initialize and run while loop
    emptyCwtMatrix = tf.zeros([length, 0], dtype='float32') 
    i = tf.constant(1)
    _, result = tf.while_loop(
            cond_,
            body,
            [i, emptyCwtMatrix],
            shape_invariants=[i.get_shape(), tf.TensorShape([length, None])],
            back_prop=False,
            parallel_iterations=1024,
            )
    result = tf.transpose(result)

    return result

# ------------------------------------------------------
#                 wavelets
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def lesser_equal(x, y):
    '''Element-wise truth value of (x <= y).
    Returns a bool tensor.
    '''
    return tf.less_equal(x, y)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __le__(self, other):
        return tf.less_equal(self, other)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _prob(self, given):
        mask = tf.cast(tf.logical_and(tf.less_equal(self.minval, given),
                                      tf.less(given, self.maxval)),
                       self.dtype)
        p = 1. / (self.maxval - self.minval)
        if self._check_numerics:
            p = tf.check_numerics(p, "p")
        return p * mask
项目:densecap-tensorflow    作者:rampage644    | 项目源码 | 文件源码
def huber_loss(x, delta=1):
    coef = 0.5
    l2_mask = tf.less_equal(tf.abs(x), delta)
    l1_mask = tf.greater(tf.abs(x), delta)

    term_1 = tf.reduce_sum(coef * tf.square(tf.boolean_mask(x, l2_mask)))
    term_2 = tf.reduce_sum(delta * (tf.abs(tf.boolean_mask(x, l1_mask)) - coef * delta))

    return term_1 + term_2
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
项目:ssd_tensorflow    作者:railsnoob    | 项目源码 | 文件源码
def _smooth_l1(self,x):
        return tf.where( tf.less_equal(tf.abs(x),1.0), 0.5*x*x,  tf.abs(x) - 0.5)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def lesser_equal(x, y):
    """Element-wise truth value of (x <= y).

    # Returns
        A bool tensor.
    """
    return tf.less_equal(x, y)
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def certainty(self):
        certainty = self.seg_prediction * tf.log(self.seg_prediction)
        certainty = -tf.reduce_sum(certainty,reduction_indices=2)
        s1 = tf.ones(tf.shape(certainty))
        csum = tf.cumsum(s1,axis=1)
        mask = tf.less_equal(csum,tf.cast(tf.tile(tf.expand_dims(self._length,1),[1,tf.shape(certainty)[1]]),tf.float32))
        mask = tf.select(mask, tf.ones(tf.shape(certainty)),
                  tf.zeros(tf.shape(certainty)))
        certainty *= mask
        certainty = tf.reduce_sum(certainty, reduction_indices=1)
        return certainty
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _example_too_big(self, example, max_length):
        return tf.less_equal(self._example_length(example), max_length)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = tf.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = tf.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, tf.add, core.add),
        ('sub', operator.sub, tf.sub, core.sub),
        ('mul', operator.mul, tf.mul, core.mul),
        ('div', operator.truediv, tf.div, core.div),
        ('mod', operator.mod, tf.mod, core.mod),
        ('pow', operator.pow, tf.pow, core.pow_function),
        ('equal', None, tf.equal, core.equal),
        ('less', operator.lt, tf.less, core.less),
        ('less_equal', operator.le, tf.less_equal, core.less_equal),
        ('not_equal', None, tf.not_equal, core.not_equal),
        ('greater', operator.gt, tf.greater, core.greater),
        ('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def lesser_equal(x, y):
    '''Element-wise truth value of (x <= y).
    Returns a bool tensor.
    '''
    return tf.less_equal(x, y)
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
项目:ethnicity-tensorflow    作者:jhyuklee    | 项目源码 | 文件源码
def mask_by_index(batch_size, input_len, max_time_step):
    with tf.variable_scope('Masking') as scope:
        input_index = tf.range(0, batch_size) * max_time_step + (input_len - 1)
        lengths_transposed = tf.expand_dims(input_index, 1)
        lengths_tiled = tf.tile(lengths_transposed, [1, max_time_step])
        mask_range = tf.range(0, max_time_step)
        range_row = tf.expand_dims(mask_range, 0)
        range_tiled = tf.tile(range_row, [batch_size, 1])
        mask = tf.less_equal(range_tiled, lengths_tiled)
        weight = tf.select(mask, tf.ones([batch_size, max_time_step]),
                           tf.zeros([batch_size, max_time_step]))
        weight = tf.reshape(weight, [-1])
        return weight
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def lesser_equal(x, y):
    """Element-wise truth value of (x <= y).

    # Returns
        A bool tensor.
    """
    return tf.less_equal(x, y)
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def less_equal(x, y):
    """Element-wise truth value of (x <= y).

    # Arguments
        x: Tensor or variable.
        y: Tensor or variable.

    # Returns
        A bool tensor.
    """
    return tf.less_equal(x, y)
项目:DeepDeepParser    作者:janmbuys    | 项目源码 | 文件源码
def mask_decoder_only_shift(logit, thin_stack_head_next, transition_state_map,
                          logit_size, batch_size):
  """Ensures that if the stack is empty, has to GEN_STATE (shift transition)

  For each batch entry k:
    If thin_stack_head_next == 0, #alternatively, or 1.
      let logit[k][reduce_index] = -np.inf, 
    else don't change.
  """
  stack_is_empty_bool = tf.less_equal(thin_stack_head_next, 1) 
  stack_is_empty = tf.select(stack_is_empty_bool, 
                            tf.ones(tf.pack([batch_size]), dtype=tf.int32),
                            tf.zeros(tf.pack([batch_size]), dtype=tf.int32))
  stack_is_empty = tf.reshape(stack_is_empty, [-1, 1])

  # Sh and Re states are disallowed (but not root).
  state_is_disallowed_updates = tf.sparse_to_dense(
      tf.pack([data_utils.RE_STATE, data_utils.ARC_STATE]),
      tf.pack([data_utils.NUM_TR_STATES]), 1)
  logit_states = tf.gather(transition_state_map, tf.range(logit_size))
  state_is_disallowed = tf.gather(state_is_disallowed_updates, logit_states)
  state_is_disallowed = tf.reshape(state_is_disallowed, [1, -1])

  index_delta = tf.matmul(stack_is_empty, state_is_disallowed) # 1 if disallowed
  values = tf.pack([0, -np.inf])
  delta = tf.gather(values, index_delta)
  new_logit = logit + delta
  return new_logit
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def lesser_equal(x, y):
    '''Element-wise truth value of (x <= y).
    Returns a bool tensor.
    '''
    return tf.less_equal(x, y)
项目:DAVIS-2016-Chanllege-Solution    作者:tangyuhao    | 项目源码 | 文件源码
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def le(a, b):
    """a <= b"""
    return tf.less_equal(a, b)
项目:tensorrec    作者:jfkirk    | 项目源码 | 文件源码
def warp_loss(tf_prediction, tf_y, **kwargs):
    # TODO JK: implement WARP loss

    tf_positive_mask = tf.greater(tf_y, 0.0)
    tf_negative_mask = tf.less_equal(tf_y, 0.0)

    tf_positive_predictions = tf.boolean_mask(tf_prediction, tf_positive_mask) # noqa
    tf_negative_predictions = tf.boolean_mask(tf_prediction, tf_negative_mask) # noqa
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def _bilinear_interpolate(self,im, im_org, x, y):
        with tf.variable_scope('_interpolate'):
            # constants
            x = tf.cast(x, 'float32')
            y = tf.cast(y, 'float32')
            height_f = tf.cast(self.height, 'float32')
            width_f = tf.cast(self.width, 'float32')
            zero = tf.zeros([], dtype='int32')
            max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
            max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
            # scale indices from [-1, 1] to [0, width/height]
            x = (x + 1.0)*(width_f) / 2.0
            y = (y + 1.0)*(height_f) / 2.0
            # do sampling
            x0 = tf.cast(tf.floor(x), 'int32')
            x1 = x0 + 1
            y0 = tf.cast(tf.floor(y), 'int32')
            y1 = y0 + 1
            x0 = tf.clip_by_value(x0, zero, max_x)
            x1 = tf.clip_by_value(x1, zero, max_x)
            y0 = tf.clip_by_value(y0, zero, max_y)
            y1 = tf.clip_by_value(y1, zero, max_y)
            dim2 = self.width
            dim1 = self.width*self.height
            base = self._repeat(tf.range(self.num_batch)*dim1, self.out_height*self.out_width, 'int32')
            base_y0 = base + y0*dim2
            base_y1 = base + y1*dim2
            idx_a = tf.expand_dims(base_y0 + x0, 1)
            idx_b = tf.expand_dims(base_y1 + x0, 1)
            idx_c = tf.expand_dims(base_y0 + x1, 1)
            idx_d = tf.expand_dims(base_y1 + x1, 1)
            # use indices to lookup pixels in the flat image and restore
            # channels dim
            im_flat = tf.reshape(im, tf.stack([-1, self.num_channels]))
            im_flat = tf.cast(im_flat, 'float32')
            Ia = tf.scatter_nd(idx_a, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Ib = tf.scatter_nd(idx_b, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Ic = tf.scatter_nd(idx_c, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Id = tf.scatter_nd(idx_d, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])

            x0_f = tf.cast(x0, 'float32')
            x1_f = tf.cast(x1, 'float32')
            y0_f = tf.cast(y0, 'float32')
            y1_f = tf.cast(y1, 'float32')
            wa = tf.scatter_nd(idx_a, tf.expand_dims(((x1_f-x) * (y1_f-y)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wb = tf.scatter_nd(idx_b, tf.expand_dims(((x1_f-x) * (y-y0_f)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wc = tf.scatter_nd(idx_c, tf.expand_dims(((x-x0_f) * (y1_f-y)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wd = tf.scatter_nd(idx_d, tf.expand_dims(((x-x0_f) * (y-y0_f)), 1), [self.num_batch*self.out_height*self.out_width, 1])

            value_all = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
            weight_all = tf.clip_by_value(tf.add_n([wa, wb, wc, wd]),1e-5,1e+10)
            flag = tf.less_equal(weight_all, 1e-5* tf.ones_like(weight_all))
            flag = tf.cast(flag, tf.float32)
            im_org = tf.reshape(im_org, [-1,self.num_channels])
            output = tf.add(tf.div(value_all, weight_all), tf.multiply(im_org, flag))
            return output
项目:tf.rasterizer    作者:vahidk    | 项目源码 | 文件源码
def draw_fn(self, shader):
        indices = tf.placeholder(tf.int32, [None, 3], name="ph_indices")
        verts = [None, None, None]

        for i in range(3):
            verts[i] = shader.vertex(indices[:, i], i)
            verts[i] = tf.matmul(verts[i], self.viewport, transpose_b=True)
            verts[i] = utils.affine_to_cartesian(verts[i])

        bbmin, bbmax = bounds(verts, self.width, self.height)

        def _fn(i):
            bbmin_i = tf.gather(bbmin, i)
            bbmax_i = tf.gather(bbmax, i)
            verts_i = [tf.gather(verts[0], i),
                       tf.gather(verts[1], i),
                       tf.gather(verts[2], i)]

            x, y = tf.meshgrid(tf.range(bbmin_i[0], bbmax_i[0]),
                               tf.range(bbmin_i[1], bbmax_i[1]))

            num_frags = tf.reduce_prod(tf.shape(x))
            p = tf.stack([tf.reshape(x, [-1]),
                          tf.reshape(y, [-1]),
                          tf.zeros([num_frags], dtype=tf.float32)], axis=1)

            bc, valid = barycentric(verts_i, p)

            p = tf.boolean_mask(p, valid)
            bc = [tf.boolean_mask(bc[k], valid) for k in range(3)]
            z = utils.tri_dot([verts_i[k][2] for k in range(3)], bc)

            inds = tf.to_int32(tf.stack([p[:, 1], p[:, 0]], axis=1))
            cur_z = tf.gather_nd(self.depth, inds)
            visible = tf.less_equal(cur_z, z)

            inds = tf.boolean_mask(inds, visible)
            bc = [tf.boolean_mask(bc[k], visible) for k in range(3)]
            z = tf.boolean_mask(z, visible)

            c = utils.pack_colors(shader.fragment(bc, i), 1)

            updates = [
                tf.scatter_nd_update(self.color, inds, c, use_locking=False),
                tf.scatter_nd_update(self.depth, inds, z, use_locking=False)]
            return updates

        num_faces = tf.shape(indices)[0]
        updates = utils.sequential_for(_fn, 0, num_faces)
        self.commands.append(updates)

        def _draw(indices_val, **kwargs):
            self.args[indices] = indices_val
            for k, v in kwargs.items():
                self.args[getattr(shader, k)] = v

        return _draw
项目:LiTeFlow    作者:petrux    | 项目源码 | 文件源码
def trim(tensor, width):
    """Trim the tensor on the -1 axis.

    Trim a given tensor of shape `[..., in_width]` to a smaller tensor
    of shape `[..., width]`, along the -1 axis. If the `width` argument
    is greater or equal than the actual width of the tensor, no operation
    is performed.

    Arguments:
      tensor: a 3D tf.Tensor of shape `[..., in_width]`.
      width: a `int` representing the target value of the 3rd
        dimension of the output tensor.

    Returns:
      a 3D tensor of shape `[..., width]` where the
        third dimension is the minimum between the input width
        and the value of the `width` argument.

    Example:
    ```python
    # t is a tensor like:
    # [[[1, 1, 1],
        [2, 2, 2],
        [3, 3, 3]],
        [7, 7, 7],
        [8, 8, 8],
        [9, 9, 9]]]

    q = trim(t, 2)

    # q is a tensor like:
    # [[[1, 1],
        [2, 2],
        [3, 3]],
        [7, 7],
        [8, 8],
        [9, 9]]]
"""
result = tf.cond(
    tf.less_equal(tf.shape(tensor)[-1], width),
    lambda: tensor,
    lambda: _trim(tensor, width))
result.set_shape(tensor.get_shape().as_list()[:-1] + [width])
return result

```

项目:TF-deeplab    作者:chenxi116    | 项目源码 | 文件源码
def _build_train_op(self):
    """Build training specific ops for the graph."""
    labels_coarse = tf.image.resize_nearest_neighbor(self.labels, 
      [tf.shape(self.pred)[1], tf.shape(self.pred)[2]])
    labels_coarse = tf.squeeze(labels_coarse, squeeze_dims=[3])
    self.labels_coarse = tf.to_int32(labels_coarse)

    # ignore illegal labels
    raw_pred = tf.reshape(self.logits, [-1, self.num_classes])
    raw_gt = tf.reshape(self.labels_coarse, [-1,])
    indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, self.num_classes - 1)), 1)
    remain_pred = tf.gather(raw_pred, indices)
    remain_gt = tf.gather(raw_gt, indices)

    xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=remain_pred, 
      labels=remain_gt)
    self.cls_loss = tf.reduce_mean(xent, name='xent')
    self.cost = self.cls_loss + self._decay()
    # tf.summary.scalar('cost', self.cost)

    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    self.learning_rate = tf.train.polynomial_decay(self.lrn_rate, 
      self.global_step, self.lr_decay_step, power=0.9)
    # tf.summary.scalar('learning rate', self.learning_rate)

    tvars = tf.trainable_variables()

    if self.optimizer == 'sgd':
      optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
    elif self.optimizer == 'mom':
      optimizer = tf.train.MomentumOptimizer(self.learning_rate, 0.9)
    else:
      raise NameError("Unknown optimizer type %s!" % self.optimizer)

    grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)
    var_lr_mult = {}
    for var in tvars:
      if var.op.name.find(r'fc1_voc12') > 0 and var.op.name.find(r'biases') > 0:
        var_lr_mult[var] = 20.
      elif var.op.name.find(r'fc1_voc12') > 0:
        var_lr_mult[var] = 10.
      else:
        var_lr_mult[var] = 1.
    grads_and_vars = [((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v) 
        for g, v in grads_and_vars]

    apply_op = optimizer.apply_gradients(grads_and_vars,
        global_step=self.global_step, name='train_step')

    train_ops = [apply_op] + self._extra_train_ops
    self.train_step = tf.group(*train_ops)

  # TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def bucket_by_sequence_length(self, dataset, example_length_fn, bucket_boundaries,
                                  bucket_batch_sizes, window_size):
        """Bucket entries in dataset by length.

        Args:
          dataset: Dataset of dict<feature name, Tensor>.
          example_length_fn: function from example to int, determines the length of
            the example, which will determine the bucket it goes into.
          bucket_boundaries: list<int>, boundaries of the buckets.
          bucket_batch_sizes: list<int>, batch size per bucket.
          window_size: an integer divisible by all elements of bucket_batch_sizes

        Returns:
          Dataset of padded and batched examples.
        """
        with tf.name_scope("bucket_by_seq_length"):

            def example_to_bucket_id(example):
                """Return int64 id of the length bucket for this example."""
                seq_length = example_length_fn(example)

                boundaries = list(bucket_boundaries)
                buckets_min = [np.iinfo(np.int32).min] + boundaries
                buckets_max = boundaries + [np.iinfo(np.int32).max]
                conditions_c = tf.logical_and(
                    tf.less_equal(buckets_min, seq_length),
                    tf.less(seq_length, buckets_max))
                bucket_id = tf.reduce_min(tf.where(conditions_c))

                return bucket_id

            def batching_fn(bucket_id, grouped_dataset):
                batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
                batch_size = batch_sizes[bucket_id]

                # Pad each dimension of each feature so that they match.
                padded_shapes = dict(
                    [(name, [None] * len(shape))
                     for name, shape in grouped_dataset.output_shapes.items()])
                return grouped_dataset.padded_batch(batch_size, padded_shapes)

            dataset = dataset.group_by_window(example_to_bucket_id, batching_fn,
                                              window_size)
            return dataset
项目:jon-siamese    作者:maigimenez    | 项目源码 | 文件源码
def __init__(self, sequence_length, vocab_size, embedding_size,
                 filter_sizes, num_filters, margin):
        with tf.name_scope("embeddings") as embeddings_scope:
            self.filter_sizes = filter_sizes
            self.embedding_size = embedding_size
            self.num_filters = num_filters
            self.W_embedding = tf.Variable(tf.random_uniform([vocab_size, self.embedding_size], -1.0, 1.0),
                                           trainable=True, name="W_embedding")
            self.is_training = tf.placeholder(tf.bool, [], name='is_training')

        with tf.variable_scope("siamese") as siam_scope:
            # 1ST LAYER: Embedding layer
            with tf.variable_scope("embeddings-siamese") as input_scope:
                self.left_input = tf.placeholder(tf.int32, [None, sequence_length], name='left')
                left_embedded_words = tf.nn.embedding_lookup(self.W_embedding, self.left_input)
                self.left_embedded = tf.expand_dims(left_embedded_words, -1, name='left_embeddings')
                print('  ---> EMBEDDING LEFT: ', self.left_embedded)

                self.right_input = tf.placeholder(tf.int32, [None, sequence_length], name='right')
                right_embedded_words = tf.nn.embedding_lookup(self.W_embedding, self.right_input)
                self.right_embedded = tf.expand_dims(right_embedded_words, -1, name='right_embeddings')
                print('  ---> EMBEDDING RIGHT: ', self.right_embedded)

            self.left_siamese = self.subnet(self.left_embedded, 'left', False)
            print("---> SIAMESE TENSOR: ", self.left_siamese)
            siam_scope.reuse_variables()
            self.right_siamese = self.subnet(self.right_embedded, 'right', True)
            print("---> SIAMESE TENSOR: ", self.right_siamese)

        with tf.name_scope("similarity"):
            print('\n ----------------------- JOIN SIAMESE ----------------------------')
            self.labels = tf.placeholder(tf.int32, [None, 1], name='labels')
            self.labels = tf.to_float(self.labels)
            print('---> LABELS: ', self.labels)

            with tf.variable_scope("loss"):
                self.margin = tf.get_variable('margin', dtype=tf.float32,
                                              initializer=tf.constant(margin, shape=[1]),
                                              trainable=False)
                self.loss, self.attr, \
                self.rep, self.distance, self.maxpart = contrastive_loss(self.labels,
                                                           self.left_siamese,
                                                           self.right_siamese,
                                                           self.margin)

        with tf.name_scope("prediction"):
            # TODO Este es un parámetro de configuración
            self.threshold = tf.get_variable('threshold', dtype=tf.float32,
                                             initializer=tf.constant(1.0, shape=[1]))
            self.predictions = tf.less_equal(self.distance, self.threshold)
            self.predictions = tf.cast(self.predictions, 'float32')
            self.correct_predictions = tf.equal(self.predictions, self.labels)
            self.accuracy = tf.reduce_mean(tf.cast(self.correct_predictions, tf.float32))
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def bucket_by_sequence_length(dataset,
                              example_length_fn,
                              bucket_boundaries,
                              bucket_batch_sizes,
                              padded_shapes=None):
  """Bucket entries in dataset by length.

  Args:
    dataset: Dataset of dict<feature name, Tensor>.
    example_length_fn: function from example to int, determines the length of
      the example, which will determine the bucket it goes into.
    bucket_boundaries: list<int>, boundaries of the buckets.
    bucket_batch_sizes: list<int>, batch size per bucket.
    padded_shapes: dict<feature name, list<int>>, optional, shapes of the
      features with None where feature should be padded to max in that dim.

  Returns:
    Dataset of padded and batched examples.
  """
  with tf.name_scope("bucket_by_seq_length"):

    def example_to_bucket_id(example):
      """Return int64 id of the length bucket for this example."""
      seq_length = example_length_fn(example)

      boundaries = list(bucket_boundaries)
      buckets_min = [np.iinfo(np.int32).min] + boundaries
      buckets_max = boundaries + [np.iinfo(np.int32).max]
      conditions_c = tf.logical_and(
          tf.less_equal(buckets_min, seq_length),
          tf.less(seq_length, buckets_max))
      bucket_id = tf.reduce_min(tf.where(conditions_c))

      return bucket_id

    def window_size_fn(bucket_id):
      # window size = batch size
      batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
      window_size = batch_sizes[bucket_id]
      return window_size

    def batching_fn(bucket_id, grouped_dataset):
      batch_sizes = tf.constant(bucket_batch_sizes, dtype=tf.int64)
      batch_size = batch_sizes[bucket_id]
      return padded_batch(grouped_dataset, batch_size, padded_shapes)

    dataset = dataset.apply(
        tf.contrib.data.group_by_window(example_to_bucket_id, batching_fn, None,
                                        window_size_fn))
    return dataset
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def _subsample_selection_to_desired_neg_pos_ratio(self,
                                                    indices,
                                                    match,
                                                    max_negatives_per_positive,
                                                    min_negatives_per_image=0):
    """Subsample a collection of selected indices to a desired neg:pos ratio.

    This function takes a subset of M indices (indexing into a large anchor
    collection of N anchors where M<N) which are labeled as positive/negative
    via a Match object (matched indices are positive, unmatched indices
    are negative).  It returns a subset of the provided indices retaining all
    positives as well as up to the first K negatives, where:
      K=floor(num_negative_per_positive * num_positives).

    For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors),
    with positives=[2, 5] and negatives=[4, 7, 9, 10] and
    num_negatives_per_positive=1, then the returned subset of indices
    is [2, 4, 5, 7].

    Args:
      indices: An integer tensor of shape [M] representing a collection
        of selected anchor indices
      match: A matcher.Match object encoding the match between anchors and
        groundtruth boxes for a given image, with rows of the Match objects
        corresponding to groundtruth boxes and columns corresponding to anchors.
      max_negatives_per_positive: (float) maximum number of negatives for
        each positive anchor.
      min_negatives_per_image: minimum number of negative anchors for a given
        image. Allow sampling negatives in image without any positive anchors.

    Returns:
      selected_indices: An integer tensor of shape [M'] representing a
        collection of selected anchor indices with M' <= M.
      num_positives: An integer tensor representing the number of positive
        examples in selected set of indices.
      num_negatives: An integer tensor representing the number of negative
        examples in selected set of indices.
    """
    positives_indicator = tf.gather(match.matched_column_indicator(), indices)
    negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices)
    num_positives = tf.reduce_sum(tf.to_int32(positives_indicator))
    max_negatives = tf.maximum(min_negatives_per_image,
                               tf.to_int32(max_negatives_per_positive *
                                           tf.to_float(num_positives)))
    topk_negatives_indicator = tf.less_equal(
        tf.cumsum(tf.to_int32(negatives_indicator)), max_negatives)
    subsampled_selection_indices = tf.where(
        tf.logical_or(positives_indicator, topk_negatives_indicator))
    num_negatives = tf.size(subsampled_selection_indices) - num_positives
    return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]),
            num_positives, num_negatives)