Python tensorflow 模块,scatter_nd() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用tensorflow.scatter_nd()

项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def masked_apply(tensor, op, mask):
    """Applies `op` to tensor only at locations indicated by `mask` and sets the rest to zero.

    Similar to doing `tensor = tf.where(mask, op(tensor), tf.zeros_like(tensor))` but it behaves correctly
    when `op(tensor)` is NaN or inf while tf.where does not.

    :param tensor: tf.Tensor
    :param op: tf.Op
    :param mask: tf.Tensor with dtype == bool
    :return: tf.Tensor
    """
    chosen = tf.boolean_mask(tensor, mask)
    applied = op(chosen)
    idx = tf.to_int32(tf.where(mask))
    result = tf.scatter_nd(idx, applied, tf.shape(tensor))
    return result
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Mean impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Sum the real values in each column
        col_tot = tf.reduce_sum(data_zeroed_missing_tf, 0)

        # Divide column totals by the number of non-nan values
        num_values_col = tf.reduce_sum(self.real_val_mask, 0)
        num_values_col = tf.maximum(num_values_col,
                                    tf.ones(tf.shape(num_values_col)))
        col_nan_means = tf.div(col_tot, num_values_col)

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_nan_means, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Randomly impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Divide column totals by the number of non-nan values
        col_draws = [n.sample(seed=next(seedgen)) for n in self.normal_array]
        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_draws, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
项目:Deep-Image-Matting    作者:Joker316701882    | 项目源码 | 文件源码
def unpool(pool, ind, ksize=[1, 2, 2, 1], scope='unpool'):

    with tf.variable_scope(scope):
        input_shape = pool.get_shape().as_list()
        output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])

        flat_input_size = np.prod(input_shape)
        flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]

        pool_ = tf.reshape(pool, [flat_input_size])
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        ind_ = tf.reshape(ind, [flat_input_size, 1])
        ind_ = tf.concat([b, ind_], 1)

        ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
    with tf.name_scope(scope):
        input_shape =  tf.shape(pool)
        output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
        flat_input_size = tf.cumprod(input_shape)[-1]
        flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
        pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
        batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
                                shape=tf.stack([input_shape[0], 1, 1, 1]))
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, tf.stack([flat_input_size, 1]))
        ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
        ind_ = tf.concat([b, ind_], 1)
        ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
        ret = tf.reshape(ret, tf.stack(output_shape))
        ret = tf.reshape(ret, shape=shape)
        return ret
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def vec_to_tri(vectors, N):
    """
    Takes a D x M tensor `vectors' and maps it to a D x matrix_size X matrix_sizetensor
    where the where the lower triangle of each matrix_size x matrix_size matrix is
    constructed by unpacking each M-vector.

    Native TensorFlow version of Custom Op by Mark van der Wilk.

    def int_shape(x):
        return list(map(int, x.get_shape()))

    D, M = int_shape(vectors)
    N = int( np.floor( 0.5 * np.sqrt( M * 8. + 1. ) - 0.5 ) )
    # Check M is a valid triangle number
    assert((matrix * (N + 1)) == (2 * M))
    """
    indices = list(zip(*np.tril_indices(N)))
    indices = tf.constant([list(i) for i in indices], dtype=tf.int64)

    def vec_to_tri_vector(vector):
        return tf.scatter_nd(indices=indices, shape=[N, N], updates=vector)

    return tf.map_fn(vec_to_tri_vector, vectors)
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def build_step(self, signals):
        if self.len_match:
            super(SparseDotIncBuilder, self).build_step(signals)
            return

        A = signals.gather(self.A_data)
        X = signals.gather(self.X_data)

        assert A.get_shape()[0] == self.sparse_indices.get_shape()[0]

        # approach 1: using sparse_tensor_dense_matmul
        dot = gen_sparse_ops._sparse_tensor_dense_mat_mul(
            self.sparse_indices, A, self.A_shape, X)

        # approach 2: matmul(a_is_sparse)
        # sparse_A = tf.scatter_nd(self.sparse_indices, A, self.A_shape)
        # dot = tf.matmul(sparse_A, X, a_is_sparse=self.is_sparse)

        dot.set_shape(self.Y_data.shape + (signals.minibatch_size,))

        signals.scatter(self.Y_data, dot, mode=self.mode)
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x
项目:Tensorflow_WhatWhereAutoencoder    作者:yselivonchyk    | 项目源码 | 文件源码
def unpool(net, mask, stride):
  assert mask is not None
  with tf.name_scope('UnPool2D'):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()
    #  calculation new shape
    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
    # calculation indices for batch, height, width and feature maps
    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range
    # transpose indices & reshape update values to one dimension
    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret
项目:TensorFlow_DCIGN    作者:yselivonchyk    | 项目源码 | 文件源码
def unpool(net, mask, stride=2):
  assert mask is not None
  with tf.name_scope('UnPool2D'):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()
    #  calculation new shape
    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
    # calculation indices for batch, height, width and feature maps
    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range
    # transpose indices & reshape update values to one dimension
    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret
项目:py-noisemaker    作者:aayars    | 项目源码 | 文件源码
def wormhole(tensor, shape, kink, input_stride, alpha=1.0):
    """
    Apply per-pixel field flow. Non-iterative.

    :param Tensor tensor:
    :param list[int] shape:
    :param float kink: Path twistiness
    :param float input_stride: Maximum pixel offset
    :return: Tensor
    """

    height, width, channels = shape

    values = value_map(tensor, shape)

    degrees = values * 360.0 * math.radians(1) * kink
    # stride = values * height * input_stride
    stride = height * input_stride

    x_index = tf.cast(row_index(shape), tf.float32)
    y_index = tf.cast(column_index(shape), tf.float32)

    x_offset = (tf.cos(degrees) + 1) * stride
    y_offset = (tf.sin(degrees) + 1) * stride

    x = tf.cast(x_index + x_offset, tf.int32) % width
    y = tf.cast(y_index + y_offset, tf.int32) % height

    luminosity = tf.square(tf.reshape(values, [height, width, 1]))

    out = normalize(tf.scatter_nd(offset_index(y, height, x, width), tensor * luminosity, tf.shape(tensor)))

    return blend(tensor, tf.sqrt(out), alpha)
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Randomly impute a rank 2 tensor.

        Parameters
        ----------
        X_2D : Tensor
            a rank 2 Tensor with missing data
        scalars : Tensor 1 x D
            these values are filled into the missing elements (per column)

        Returns
        -------
        X_imputed : Tensor
            a rank 2 Tensor with imputed data

        """
        # Fill zeros in for missing data initially
        data_zeroed_missing = X_2D * self.real_val_mask

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(self.impute_scalars[0, :],
                                 self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing + missing_imputed

        return X_with_impute
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def _impute2D(self, X_2D):
        r"""Impute a rank 2 tensor with draws from normal distributions.

        Parameters
        ----------
        X_2D : Tensor
            a rank 2 Tensor with missing data

        Returns
        -------
        X_imputed : Tensor
            a rank 2 Tensor with imputed data

        """
        # Fill zeros in for missing data initially
        data_zeroed_missing = X_2D * self.real_val_mask

        # Divide column totals by the number of non-nan values
        col_draws = tf.transpose(self.normal.sample(seed=next(seedgen)))
        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_draws, self.missing_ind[:, 1])[:, 0]

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing + missing_imputed

        return X_with_impute
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def Unpool_layer(self, pool, ind, k_size=[1, 2, 2, 1]):
        # https://github.com/tensorflow/tensorflow/issues/2169
        """
           Unpooling layer after max_pool_with_argmax.
           Args:
               pool:   max pooled output tensor
               ind:      argmax indices
               k_size:     k_size is the same as for the pool
           Return:
               unpool:    unpooling tensor
        """
        with tf.name_scope('Unpool'):
            input_shape =  tf.shape(pool)
            input_shape_aslist = pool.get_shape().as_list()
            output_shape = tf.stack([input_shape[0], input_shape[1] * k_size[1], input_shape[2] * k_size[2], input_shape[3]])
            output_shapeaslist = [-1,  input_shape_aslist[1]* k_size[1] ,  input_shape_aslist[2]  * k_size[2], input_shape_aslist[3]]

            pool_ = tf.reshape(pool, [input_shape_aslist[1] * input_shape_aslist[2] * input_shape_aslist[3]])
            batch_range = tf.reshape(tf.range(tf.cast(input_shape[0], tf.int64), dtype=ind.dtype), 
                                              shape=tf.stack([input_shape[0], 1, 1, 1]))
            b = tf.ones_like(ind) * batch_range
            b = tf.reshape(b, tf.stack([ input_shape_aslist[1] * input_shape_aslist[2] * input_shape_aslist[3], 1]))
            ind_ = tf.reshape(ind, tf.stack( [input_shape_aslist[1] * input_shape_aslist[2] * input_shape_aslist[3], 1]))
            ind_ = tf.concat([b, ind_], 1)
            ret = tf.scatter_nd(ind_, pool_, shape=tf.cast([input_shape[0], output_shapeaslist[1] * output_shapeaslist[2] * output_shapeaslist[3] ], tf.int64))
            ret = tf.reshape(ret, [-1, output_shapeaslist[1], output_shapeaslist[2], output_shapeaslist[3]])
            return ret
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def test_scatter_nd():
    indices = tf.constant([[3]])
    updates = tf.constant([[9,10,11,12]])
    shape = tf.constant([8,4])
    scatter = tf.scatter_nd(indices, updates, shape)

    return scatter
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __match_no_miss(self,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores,jaccard,gt_labels,gt_bboxes, num_anchors):
        #make sure every ground truth box can be matched to at least one anchor box
        max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)
        def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
            r = tf.less(i, tf.shape(gt_labels)[0])
            return r
        def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

            #upate gt_anchors_labels
            updates = tf.reshape(gt_labels[i], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])


            new_labels = tf.scatter_nd(indices, updates, shape)
            new_mask = tf.cast(new_labels, tf.bool)
            gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

            #update gt_anchors_bboxes
            updates = tf.reshape(gt_bboxes[i], [1,-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.shape(gt_anchors_bboxes)
            new_bboxes = tf.scatter_nd(indices, updates, shape)
            gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

            #update gt_anchors_scores
            updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])
            new_scores = tf.scatter_nd(indices, updates, shape)
            gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



            return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


        i = 0
        [i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores] = tf.while_loop(cond, body,[i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores])

        return gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
        with tf.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                           [time, outputs, state, sample_ids]):
            (finished, base_next_inputs, state) = (
                super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
                    time=time,
                    outputs=outputs,
                    state=state,
                    sample_ids=sample_ids,
                    name=name))

            def maybe_sample():
                """Perform scheduled sampling."""
                where_sampling = tf.cast(
                    tf.where(sample_ids > -1), tf.int32)
                where_not_sampling = tf.cast(
                    tf.where(sample_ids <= -1), tf.int32)
                where_sampling_flat = tf.reshape(where_sampling, [-1])
                where_not_sampling_flat = tf.reshape(
                    where_not_sampling, [-1])
                sample_ids_sampling = tf.gather(
                    sample_ids, where_sampling_flat)
                inputs_not_sampling = tf.gather(
                    base_next_inputs, where_not_sampling_flat)
                sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
                base_shape = tf.shape(base_next_inputs)
                return (tf.scatter_nd(indices=where_sampling,
                                      updates=sampled_next_inputs,
                                      shape=base_shape)
                        + tf.scatter_nd(indices=where_not_sampling,
                                        updates=inputs_not_sampling,
                                        shape=base_shape))

            all_finished = tf.reduce_all(finished)
            next_inputs = tf.cond(
                all_finished, lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state)
项目:nengo_dl    作者:nengo    | 项目源码 | 文件源码
def build_step(self, signals):
        input = signals.gather(self.input_data)
        input = tf.reshape(input, (self.n_ops, -1))

        state = signals.gather(self.state_sig)

        # compute output
        if self.C is None:
            output = tf.zeros_like(input)
        else:
            output = state * self.C
            output = tf.reshape(
                output,
                (self.n_ops, -1, signals.minibatch_size * self.signal_d))
            output = tf.reduce_sum(output, axis=1)

        if self.D is not None:
            output += self.D * input

        signals.scatter(self.output_data, output)

        # update state
        r = gen_sparse_ops._sparse_tensor_dense_mat_mul(
            self.A_indices, self.A, self.A_shape, state)

        with tf.control_dependencies([output]):
            state = r + tf.scatter_nd(self.offsets, input,
                                      self.state_sig.shape)
            # TODO: tensorflow does not yet support sparse_tensor_dense_add
            # on the GPU
            # state = gen_sparse_ops._sparse_tensor_dense_add(
            #     self.offsets, input, self.state_sig.shape, r)
        state.set_shape(self.state_sig.shape)

        signals.mark_gather(self.input_data)
        signals.mark_gather(self.state_sig)
        signals.scatter(self.state_sig, state)
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
项目:TF-SegNet    作者:mathildor    | 项目源码 | 文件源码
def unpool_with_argmax(pool, ind, name = None, ksize=[1, 2, 2, 1]):

    """
       Unpooling layer after max_pool_with_argmax.
       Args:
           pool:   max pooled output tensor
           ind:      argmax indices
           ksize:     ksize is the same as for the pool
       Return:
           unpool:    unpooling tensor
    """
    with tf.variable_scope(name):
        input_shape = pool.get_shape().as_list()
        output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])

        flat_input_size = np.prod(input_shape)
        flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]

        pool_ = tf.reshape(pool, [flat_input_size])
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        ind_ = tf.reshape(ind, [flat_input_size, 1])
        ind_ = tf.concat([b, ind_], 1)

        ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def scatter_blocks_2d(x, indices, shape):
  """scatters blocks from x into shape with indices."""
  x_shape = common_layers.shape_list(x)
  # [length, batch, heads, dim]
  x_t = tf.transpose(
      tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3])
  x_t_shape = common_layers.shape_list(x_t)
  indices = tf.reshape(indices, [-1, 1])
  scattered_x = tf.scatter_nd(indices, x_t, x_t_shape)
  scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3])
  return tf.reshape(scattered_x, shape)
项目:woipv    作者:Panaetius    | 项目源码 | 文件源码
def __unpool(self, updates, mask, ksize=[1, 2, 2, 1], output_shape=None, feature_count=None, name=''):
        with tf.variable_scope(name):
            mask = tf.cast(mask, tf.int32)
            input_shape = tf.shape(updates, out_type=tf.int32)
            #  calculation new shape

            if feature_count is None:
                feature_count = input_shape[3]

            if output_shape is None:
                output_shape = (1, input_shape[1] * ksize[1], input_shape[2] * ksize[2], feature_count)

            output_shape = tf.cast(output_shape, tf.int32)

            # calculation indices for batch, height, width and feature maps
            one_like_mask = tf.cast(tf.ones_like(mask, dtype=tf.int16), tf.int32)
            batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
            batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int32), shape=batch_shape)
            b = one_like_mask * batch_range
            y = tf.floordiv(mask, output_shape[2] * output_shape[3])
            x = tf.mod(tf.floordiv(mask, output_shape[3]), output_shape[2]) #mask % (output_shape[2] * output_shape[3]) // output_shape[3]
            feature_range = tf.range(output_shape[3], dtype=tf.int32)
            f = one_like_mask * feature_range
            # transpose indices & reshape update values to one dimension
            updates_size = tf.size(updates)
            indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
            values = tf.reshape(updates, [updates_size])
            ret = tf.scatter_nd(indices, values, output_shape)
            return ret
项目:DAVIS-2016-Chanllege-Solution    作者:tangyuhao    | 项目源码 | 文件源码
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
项目:segmentation    作者:fregu856    | 项目源码 | 文件源码
def max_unpool(inputs, pooling_indices, output_shape=None, k_size=[1, 2, 2, 1]):
    # NOTE! this function is based on the implementation by kwotsin in
    # https://github.com/kwotsin/TensorFlow-ENet

    # inputs has shape [batch_size, height, width, channels]

    # pooling_indices: pooling indices of the previously max_pooled layer

    # output_shape: what shape the returned tensor should have

    pooling_indices = tf.cast(pooling_indices, tf.int32)
    input_shape = tf.shape(inputs, out_type=tf.int32)

    one_like_pooling_indices = tf.ones_like(pooling_indices, dtype=tf.int32)
    batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
    batch_range = tf.reshape(tf.range(input_shape[0], dtype=tf.int32), shape=batch_shape)
    b = one_like_pooling_indices*batch_range
    y = pooling_indices//(output_shape[2]*output_shape[3])
    x = (pooling_indices//output_shape[3]) % output_shape[2]
    feature_range = tf.range(output_shape[3], dtype=tf.int32)
    f = one_like_pooling_indices*feature_range

    inputs_size = tf.size(inputs)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, inputs_size]))
    values = tf.reshape(inputs, [inputs_size])

    ret = tf.scatter_nd(indices, values, output_shape)

    return ret

# function for colorizing a label image:
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def _bilinear_interpolate(self,im, im_org, x, y):
        with tf.variable_scope('_interpolate'):
            # constants
            x = tf.cast(x, 'float32')
            y = tf.cast(y, 'float32')
            height_f = tf.cast(self.height, 'float32')
            width_f = tf.cast(self.width, 'float32')
            zero = tf.zeros([], dtype='int32')
            max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
            max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
            # scale indices from [-1, 1] to [0, width/height]
            x = (x + 1.0)*(width_f) / 2.0
            y = (y + 1.0)*(height_f) / 2.0
            # do sampling
            x0 = tf.cast(tf.floor(x), 'int32')
            x1 = x0 + 1
            y0 = tf.cast(tf.floor(y), 'int32')
            y1 = y0 + 1
            x0 = tf.clip_by_value(x0, zero, max_x)
            x1 = tf.clip_by_value(x1, zero, max_x)
            y0 = tf.clip_by_value(y0, zero, max_y)
            y1 = tf.clip_by_value(y1, zero, max_y)
            dim2 = self.width
            dim1 = self.width*self.height
            base = self._repeat(tf.range(self.num_batch)*dim1, self.out_height*self.out_width, 'int32')
            base_y0 = base + y0*dim2
            base_y1 = base + y1*dim2
            idx_a = tf.expand_dims(base_y0 + x0, 1)
            idx_b = tf.expand_dims(base_y1 + x0, 1)
            idx_c = tf.expand_dims(base_y0 + x1, 1)
            idx_d = tf.expand_dims(base_y1 + x1, 1)
            # use indices to lookup pixels in the flat image and restore
            # channels dim
            im_flat = tf.reshape(im, tf.stack([-1, self.num_channels]))
            im_flat = tf.cast(im_flat, 'float32')
            Ia = tf.scatter_nd(idx_a, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Ib = tf.scatter_nd(idx_b, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Ic = tf.scatter_nd(idx_c, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Id = tf.scatter_nd(idx_d, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])

            x0_f = tf.cast(x0, 'float32')
            x1_f = tf.cast(x1, 'float32')
            y0_f = tf.cast(y0, 'float32')
            y1_f = tf.cast(y1, 'float32')
            wa = tf.scatter_nd(idx_a, tf.expand_dims(((x1_f-x) * (y1_f-y)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wb = tf.scatter_nd(idx_b, tf.expand_dims(((x1_f-x) * (y-y0_f)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wc = tf.scatter_nd(idx_c, tf.expand_dims(((x-x0_f) * (y1_f-y)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wd = tf.scatter_nd(idx_d, tf.expand_dims(((x-x0_f) * (y-y0_f)), 1), [self.num_batch*self.out_height*self.out_width, 1])

            value_all = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
            weight_all = tf.clip_by_value(tf.add_n([wa, wb, wc, wd]),1e-5,1e+10)
            flag = tf.less_equal(weight_all, 1e-5* tf.ones_like(weight_all))
            flag = tf.cast(flag, tf.float32)
            im_org = tf.reshape(im_org, [-1,self.num_channels])
            output = tf.add(tf.div(value_all, weight_all), tf.multiply(im_org, flag))
            return output
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def test_scatter_nd_3():
    gt_bboxes = tf.constant([[0,0,1,2],[1,0,3,4],[100,100,105,102.5]])
    gt_labels = tf.constant([1,2,6])

    jaccard = tf.constant( [[ 0. ,     0.  ,    0.02,    0.15  ],[ 0. ,     0.3125 , 0.08,    0.    ],[ 0.5 ,    0. ,     0.  ,    0.    ]])
    gt_anchors_scores = tf.constant([0.0,0.,0.,0.])
    gt_anchors_labels = tf.constant([100,100,100,100])
    gt_anchors_bboxes=tf.constant([[100,100,105,105],[2,1,3,3.5],[0,0,10,10],[0.5,0.5,0.8,1.5]])

    max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)

    def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
        r = tf.less(i, tf.shape(gt_labels)[0])
        return r
    def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

        #upate gt_anchors_labels
        updates = tf.reshape(gt_labels[i], [-1])
        indices = tf.reshape(max_inds[i],[1,-1])
        shape = tf.reshape(tf.shape(gt_anchors_bboxes)[0],[-1])


        new_labels = tf.scatter_nd(indices, updates, shape)
        new_mask = tf.cast(new_labels, tf.bool)
        gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

        #update gt_anchors_bboxes
        updates = tf.reshape(gt_bboxes[i], [1,-1])
        indices = tf.reshape(max_inds[i],[1,-1])
        shape = tf.shape(gt_anchors_bboxes)
        new_bboxes = tf.scatter_nd(indices, updates, shape)
        gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

        #update gt_anchors_scores
        updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
        indices = tf.reshape(max_inds[i],[1,-1])
        shape = tf.reshape(tf.shape(gt_anchors_bboxes)[0],[-1])
        new_scores = tf.scatter_nd(indices, updates, shape)
        gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



        return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


    i = 0
    [i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores] = tf.while_loop(cond, body,[i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores])





    return gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def next_inputs(self, time, outputs, state, sample_ids, name=None):
        with tf.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                           [time, outputs, state, sample_ids]):
            (finished, base_next_inputs, state) = (
                super(ScheduledOutputTrainingHelper, self).next_inputs(
                    time=time,
                    outputs=outputs,
                    state=state,
                    sample_ids=sample_ids,
                    name=name))

            def maybe_sample():
                """Perform scheduled sampling."""

                def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
                    """Concatenate outputs with auxiliary inputs, if they exist."""
                    if self._auxiliary_input_tas is None:
                        return outputs_

                    next_time = time + 1
                    auxiliary_inputs = nest.map_structure(
                        lambda ta: ta.read(next_time), self._auxiliary_input_tas)
                    if indices is not None:
                        auxiliary_inputs = tf.gather_nd(
                            auxiliary_inputs, indices)
                    return nest.map_structure(
                        lambda x, y: tf.concat((x, y), -1),
                        outputs_, auxiliary_inputs)

                if self._next_input_layer is None:
                    return tf.where(
                        sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
                        base_next_inputs)

                where_sampling = tf.cast(
                    tf.where(sample_ids), tf.int32)
                where_not_sampling = tf.cast(
                    tf.where(tf.logical_not(sample_ids)), tf.int32)
                outputs_sampling = tf.gather_nd(outputs, where_sampling)
                inputs_not_sampling = tf.gather_nd(base_next_inputs,
                                                   where_not_sampling)
                sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
                    self._next_input_layer(outputs_sampling), where_sampling)

                base_shape = tf.shape(base_next_inputs)
                return (tf.scatter_nd(indices=where_sampling,
                                      updates=sampled_next_inputs,
                                      shape=base_shape)
                        + tf.scatter_nd(indices=where_not_sampling,
                                        updates=inputs_not_sampling,
                                        shape=base_shape))

            all_finished = tf.reduce_all(finished)
            next_inputs = tf.cond(
                all_finished, lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state)
项目:tensorflow-fcwta    作者:guoguo12    | 项目源码 | 文件源码
def _initialize_vars(self):
        """Sets up the training graph."""
        with tf.variable_scope(self.name) as scope:
            self.global_step = tf.get_variable(
                'global_step',
                shape=[],
                initializer=tf.zeros_initializer())
            self.input = tf.placeholder(tf.float32, shape=[None, self.input_dim])

        current = self.input
        for i in range(self.encode_layers - 1):
            current = self._relu_layer(current, self.input_dim, self.input_dim, i)
        self.encoded = self._relu_layer(current, self.input_dim, self.hidden_units, self.encode_layers - 1)

        # Make batch size the last dimension (for use with tf.nn.top_k)
        encoded_t = tf.transpose(self.encoded)

        # Compute the indices corresponding to the top k activations for each
        # neuron in the final encoder layer
        k = int(self.sparsity * self.batch_size)
        _, top_indices = tf.nn.top_k(encoded_t, k=k, sorted=False)

        # Transform top_indices, which contains rows of column indices, into
        # indices, a list of [row, column] pairs (for use with tf.scatter_nd)
        top_k_unstacked = tf.unstack(top_indices, axis=1)
        row_indices = [tf.range(self.hidden_units) for _ in range(k)]
        combined_columns = tf.transpose(tf.stack(_interleave(row_indices, top_k_unstacked)))
        indices = tf.reshape(combined_columns, [-1, 2])

        # Apply sparsity constraint
        updates = tf.ones(self.hidden_units * k)
        shape = tf.constant([self.hidden_units, self.batch_size])
        mask = tf.scatter_nd(indices, updates, shape)
        sparse_encoded = self.encoded * tf.transpose(mask)

        self.decoded = self._decode_layer(sparse_encoded)

        self.loss = tf.reduce_sum(tf.square(self.decoded - self.input))
        self.optimizer_op = self.optimizer(self.learning_rate).minimize(
            self.loss, self.global_step)

        self.saver = tf.train.Saver(tf.global_variables())
项目:num-seq-recognizer    作者:gmlove    | 项目源码 | 文件源码
def build_export_output(net_out, H, W, max_number_length, C, threshold):
  B, threshold, sprs_output_box_count = max_number_length, threshold, 100
  net_out = tf.reshape(net_out, [H, W, B, -1])
  boxes, boxes_scores, classes_probs = net_out[:, :, :, :4], net_out[:, :, :, 4], net_out[:, :, :, 5:]
  row = np.concatenate([np.ones([1, W, B], dtype=np.float32) * i for i in range(H)], axis=0)
  col = np.concatenate([np.ones([H, 1, B], dtype=np.float32) * i for i in range(W)], axis=1)
  anchors_w = np.concatenate([np.ones([H, W, 1], dtype=np.float32) * anchors[2 * i + 0] for i in range(B)], axis=2)
  anchors_h = np.concatenate([np.ones([H, W, 1], dtype=np.float32) * anchors[2 * i + 1] for i in range(B)], axis=2)
  with ops.name_scope(None, 'calc_boxes_coordinates'):
    boxes = tf.concat([
      tf.expand_dims((tf.sigmoid(boxes[:, :, :, 0]) + col) / W, 3),
      tf.expand_dims((tf.sigmoid(boxes[:, :, :, 1]) + row) / H, 3),
      tf.expand_dims(tf.exp(boxes[:, :, :, 2]) * anchors_w / W, 3),
      tf.expand_dims(tf.exp(boxes[:, :, :, 3]) * anchors_h / H, 3),
    ], axis=3)
    boxes = tf.cast(boxes, tf.float32)

  with ops.name_scope(None, 'calc_boxes_scores'):
    boxes_scores = tf.sigmoid(boxes_scores)
    boxes_scores = tf.nn.softmax(classes_probs) * tf.expand_dims(boxes_scores, 3)
    boxes_scores = boxes_scores * tf.cast(boxes_scores > threshold, tf.float32)
    boxes_scores = tf.cast(boxes_scores, tf.float32)

  with ops.name_scope(None, 'non_max_suppression'):
    boxes = tf.reshape(boxes, [H * W * B, 4])
    sprs_boxes, sprs_boxes_scores = [], []
    for i in range(C):
      box_scores = tf.reshape(boxes_scores[:, :, :, i], [H * W * B])
      sprs_boxes_indices = tf.image.non_max_suppression(boxes, box_scores, sprs_output_box_count, iou_threshold=0.4)
      box_scores = box_scores * tf.scatter_nd(
        tf.reshape(sprs_boxes_indices, [-1, 1]),
        tf.ones(tf.shape(sprs_boxes_indices), dtype=tf.float32), [H * W * B])
      sprs_boxes_scores.append(tf.reshape(box_scores, [H * W * B, 1]))

  with ops.name_scope(None, 'select_boxes'):
    sprs_boxes_scores = tf.concat(sprs_boxes_scores, axis=1)
    classes = tf.argmax(sprs_boxes_scores, axis=1)
    classes_probs = tf.reduce_max(sprs_boxes_scores, axis=1)

    selected_box_mask = classes_probs > threshold

    selected_classes = tf.boolean_mask(classes, selected_box_mask)
    selected_boxes = tf.boolean_mask(boxes, selected_box_mask)
    selected_classes_probs = tf.boolean_mask(classes_probs, selected_box_mask)
    lefts = selected_boxes[:, 0] - selected_boxes[:, 2] / 2
    lefts = tf.where(lefts < 0, tf.zeros(tf.shape(lefts)), lefts)
    selected_boxes = tf.concat([
      tf.expand_dims(lefts, 1),
      tf.expand_dims(selected_boxes[:, 1] - selected_boxes[:, 3] / 2, 1),
      tf.expand_dims(selected_boxes[:, 2], 1),
      tf.expand_dims(selected_boxes[:, 3], 1),
    ], axis=1)
    selected_lefts = selected_boxes[:, 0]

  with ops.name_scope(None, 'sort_boxes'):
    sorted_lefts, sorted_lefts_indices = tf.nn.top_k(selected_lefts * -1, tf.shape(selected_lefts)[0])

    sorted_classes = tf.gather(selected_classes, sorted_lefts_indices)
    sorted_boxes = tf.gather(selected_boxes, sorted_lefts_indices)
    sorted_classes_probs = tf.gather(selected_classes_probs, sorted_lefts_indices)
  return sorted_lefts * -1, sorted_boxes, sorted_classes, sorted_classes_probs
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def encode_annos(image, labels, bboxes, anchors, num_classes):
  """Encode annotations for losses computations.
  All the output tensors have a fix shape(none dynamic dimention).

  Args:
    image: 4-D with shape `[H, W, C]`.
    b_labels: 2-D with shape `[num_bounding_boxes]`.
    b_bboxes: 3-D with shape `[num_bounding_boxes, 4]`. Scaled.
    anchors: 4-D tensor with shape `[fea_h, fea_w, num_anchors, 4]`

  Returns:
    input_mask: 2-D with shape `[num_anchors, 1]`, indicate which anchor to be used to cal loss.
    labels_input: 2-D with shape `[num_anchors, num_classes]`, one hot encode for every anchor.
    box_delta_input: 2-D with shape `[num_anchors, 4]`.
    box_input: 2-D with shape '[num_anchors, 4]'.
  """
  anchors_shape = anchors.get_shape().as_list()
  fea_h = anchors_shape[0]
  fea_w = anchors_shape[1]
  num_anchors = anchors_shape[2] * fea_h * fea_w
  anchors = tf.reshape(anchors, [num_anchors, 4])  # reshape anchors

  # Cal iou, find the target anchor
  _anchors = xywh_to_yxyx(anchors)
  ious, indices = batch_iou_fast(_anchors, bboxes)
  indices = tf.reshape(indices, shape=[-1, 1])

  target_anchors = tf.gather(anchors, indices)
  target_anchors = tf.squeeze(target_anchors, axis=1)
  delta = batch_delta(yxyx_to_xywh_(bboxes), target_anchors)

  # bbox
  box_input = tf.scatter_nd(
    indices,
    bboxes,
    shape=[num_anchors, 4]
  )
  # label
  labels_input = tf.scatter_nd(
    indices,
    tf.one_hot(labels, num_classes),
    shape=[num_anchors, num_classes]
  )
  # anchor mask
  onehot_anchor = tf.one_hot(indices, num_anchors)
  onehot_anchor = tf.squeeze(onehot_anchor, axis=1)
  print("indices shape:", indices.get_shape().as_list())
  print("one hot anchors shape:", onehot_anchor.get_shape().as_list())
  input_mask = tf.reduce_sum(onehot_anchor, axis=0)
  input_mask = tf.reshape(input_mask, shape=[-1, 1])
  # delta
  box_delta_input = tf.scatter_nd(
    indices,
    delta,
    shape=[num_anchors, 4]
  )

  return input_mask, labels_input, box_delta_input, box_input


# TODO(shizehao): align anchor center to the grid
项目:windbag    作者:tongda    | 项目源码 | 文件源码
def create_loss(final_outputs, answers, answer_lens):
  '''
  Final outputs of the decoder may have different length with
  target answer. So we should pad the outputs if the outputs
  are shorter than target answer, and pad the target answers
  if outputs are longer than answers.
  :param answer_lens:
   `Tensor` that representing length of answers
  :param final_outputs: the output of decoder
  :param answers: the target answers
  :return: tuple of loss_op and train_op
  '''
  with tf.variable_scope('loss') as scope:
    answsers = tf.transpose(answers, (1, 0))
    print("target_tensor[0]: ", answsers[0])
    print("final_outputs: ", final_outputs.get_shape())
    print("decoder_inputs_tensor: ", answsers.get_shape())

    answer_max_len = tf.reduce_max(answer_lens)
    output_len = tf.shape(final_outputs)[0]

    def loss_with_padded_outputs():
      indexes = [[0, 1]]
      values = tf.expand_dims(answer_max_len - output_len - 1, axis=0)
      # because rank of final outputs tensor is 3, so the shape is (3, 2)
      shape = [3, 2]
      paddings = tf.scatter_nd(indexes, values, shape)
      padded_outputs = tf.pad(final_outputs, paddings)
      return tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=padded_outputs, labels=answsers[1:])

    def loss_with_padded_answers():
      indexes = [[0, 1]]
      values = tf.expand_dims(output_len - answer_max_len + 1, axis=0)
      # because rank of answers tensor is 2, so the shape is (2, 2)
      shape = [2, 2]
      paddings = tf.scatter_nd(indexes, values, shape)
      padded_answer = tf.pad(answsers, paddings)
      return tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=final_outputs, labels=padded_answer[1:])

    losses = tf.cond(output_len < answer_max_len, loss_with_padded_outputs, loss_with_padded_answers)

    losses_length = tf.shape(losses)[0]
    loss_mask = tf.sequence_mask(
      tf.to_int32(answer_lens), losses_length)

    losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
    # self.loss = tf.reduce_mean(losses)
    loss = tf.reduce_sum(losses) / tf.to_float(tf.reduce_sum(answer_lens - 1))

  return loss
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def make_subseparable_kernel(kernel_size, input_channels, filters, separability,
                             kernel_initializer, kernel_regularizer):
  """Make a kernel to do subseparable convolution wiht  `tf.nn.conv2d`.

  Args:
    kernel_size: (height, width) tuple.
    input_channels: Number of input channels.
    filters: Number of output channels.
    separability: Integer denoting separability.
    kernel_initializer: Initializer to use for the kernel.
    kernel_regularizer: Regularizer to use for the kernel.

  Returns:
    A 4D tensor.
  """
  if separability == 1:
    # Non-separable convolution
    return tf.get_variable(
        "kernel",
        kernel_size + (input_channels, filters),
        initializer=kernel_initializer,
        regularizer=kernel_regularizer)

  elif separability == 0 or separability == -1:
    # Separable convolution
    # TODO(rshin): Check initialization is as expected, as these are not 4D.
    depthwise_kernel = tf.get_variable(
        "depthwise_kernel",
        kernel_size + (input_channels,),
        initializer=kernel_initializer,
        regularizer=kernel_regularizer)

    pointwise_kernel = tf.get_variable(
        "pointwise_kernel", (input_channels, filters),
        initializer=kernel_initializer,
        regularizer=kernel_regularizer)

    expanded_depthwise_kernel = tf.transpose(
        tf.scatter_nd(
            indices=tf.tile(
                tf.expand_dims(tf.range(0, input_channels), axis=1), [1, 2]),
            updates=tf.transpose(depthwise_kernel, (2, 0, 1)),
            shape=(input_channels, input_channels) + kernel_size), (2, 3, 0, 1))

    return tf.reshape(
        tf.matmul(
            tf.reshape(expanded_depthwise_kernel, (-1, input_channels)),
            pointwise_kernel), kernel_size + (input_channels, filters))

  elif separability >= 2:
    assert filters % separability == 0, (filters, separability)
    assert input_channels % separability == 0, (filters, separability)

    raise NotImplementedError

  elif separability <= -2:
    separability *= -1
    assert filters % separability == 0, (filters, separability)
    assert input_channels % separability == 0, (filters, separability)

    raise NotImplementedError
项目:keras-rcnn    作者:broadinstitute    | 项目源码 | 文件源码
def scatter_add_tensor(ref, indices, updates, name=None):
    """
    Adds sparse updates to a variable reference.

    This operation outputs ref after the update is done. This makes it
    easier to chain operations that need to use the reset value.

    Duplicate indices: if multiple indices reference the same location,
    their contributions add.

    Requires updates.shape = indices.shape + ref.shape[1:].

    :param ref: A Tensor. Must be one of the following types: float32,
    float64, int64, int32, uint8, uint16, int16, int8, complex64, complex128,
    qint8, quint8, qint32, half.

    :param indices: A Tensor. Must be one of the following types: int32,
    int64. A tensor of indices into the first dimension of ref.

    :param updates: A Tensor. Must have the same dtype as ref. A tensor of
    updated values to add to ref

    :param name: A name for the operation (optional).

    :return: Same as ref. Returned as a convenience for operations that want
    to use the updated values after the update is done.
    """
    with tensorflow.name_scope(name, 'scatter_add_tensor', [ref, indices, updates]) as scope:
        ref = tensorflow.convert_to_tensor(ref, name='ref')

        indices = tensorflow.convert_to_tensor(indices, name='indices')

        updates = tensorflow.convert_to_tensor(updates, name='updates')

        ref_shape = tensorflow.shape(ref, out_type=indices.dtype, name='ref_shape')

        scattered_updates = tensorflow.scatter_nd(indices, updates, ref_shape, name='scattered_updates')

        with tensorflow.control_dependencies([tensorflow.assert_equal(ref_shape, tensorflow.shape(scattered_updates, out_type=indices.dtype))]):
            output = tensorflow.add(ref, scattered_updates, name=scope)

        return output