Python tensorflow 模块,ceil() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用tensorflow.ceil()

项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def Construct_Accuracy_op(self):
        with tf.name_scope('accuracy'):
            if self.model_dict['Model_Type'] is 'Classification' :
                correct_prediction = tf.equal(tf.argmax(self.model_dict['Output'], 1), tf.argmax(self.model_dict['Output_ph'], 1))
                false_images = tf.boolean_mask(self.model_dict['Reshaped_input'], tf.logical_not(correct_prediction))
                tf.summary.image(name='False images', tensor=false_images)
                self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                tf.summary.scalar('accuracy', self.accuracy)
                self.accuracy_op = True

            elif self.model_dict['Model_Type'] is 'Segmentation' :
                probs = tf.reshape((tf.sigmoid(self.model_dict['Output'])), shape=[ self.kwargs['Batch_size'], -1])
                lab = tf.reshape(self.model_dict['Output_ph'], shape=[self.kwargs['Batch_size'], -1])
                probs = tf.ceil(probs - 0.5 + 1e-10)
                intersection = tf.reduce_sum(probs * lab, axis=1) 
                union =  tf.reduce_sum(probs, 1) + tf.reduce_sum(lab, 1) 
                tf.summary.image(name='Input images',tensor = self.model_dict['Reshaped_input'])
                tf.summary.image(name='Mask',tensor = tf.reshape(self.model_dict['Output_ph'], [-1, self.kwargs['Image_width'], self.kwargs['Image_height'], 1]))
                tf.summary.image(name='Weight',tensor = tf.reshape(self.model_dict['Weight_ph'], [-1, self.kwargs['Image_width'], self.kwargs['Image_height'], 1]))
                tf.summary.image(name='Output',tensor = (tf.sigmoid(self.model_dict['Output'])))
                self.accuracy = tf.reduce_mean(2 * intersection / (union))
                tf.summary.scalar('accuracy', self.accuracy)
                self.accuracy_op = True

            elif self.model_dict['Model_Type'] is 'Sequence' :

                correct_prediction = tf.equal(tf.argmax(self.model_dict['Output'], 1), tf.reshape(tf.cast(tf.reshape(self.model_dict['Output_ph'], shape=[-1]), tf.int64), [-1]))
                pre_acc = tf.to_float(correct_prediction) * tf.to_float(tf.reshape(self.model_dict['Mask'], [-1]))
                pre_acc = tf.reduce_sum(pre_acc)
                self.accuracy = tf.div(pre_acc,  tf.maximum(1.0,tf.reduce_sum(tf.to_float(tf.reshape(self.model_dict['Mask'], [-1])))))
                tf.reduce_sum(tf.to_float(tf.reshape(self.model_dict['Mask'], [-1])))
                self.accuracy_op = True
                tf.summary.scalar('accuracy', self.accuracy)
                self.out_op = tf.argmax(self.model_dict['Output'], 1)
            #tf.cond(self.accuracy > 0.92, lambda: tf.summary.image(name='False images', tensor=false_images), lambda: tf.summary.tensor_summary(name='correct_predictions', tensor=correct_prediction))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    super(CoreUnaryOpsTest, self).setUp()

    self.ops = [
        ('abs', operator.abs, tf.abs, core.abs_function),
        ('neg', operator.neg, tf.neg, core.neg),
        # TODO(shoyer): add unary + to core TensorFlow
        ('pos', None, None, None),
        ('sign', None, tf.sign, core.sign),
        ('reciprocal', None, tf.reciprocal, core.reciprocal),
        ('square', None, tf.square, core.square),
        ('round', None, tf.round, core.round_function),
        ('sqrt', None, tf.sqrt, core.sqrt),
        ('rsqrt', None, tf.rsqrt, core.rsqrt),
        ('log', None, tf.log, core.log),
        ('exp', None, tf.exp, core.exp),
        ('log', None, tf.log, core.log),
        ('ceil', None, tf.ceil, core.ceil),
        ('floor', None, tf.floor, core.floor),
        ('cos', None, tf.cos, core.cos),
        ('sin', None, tf.sin, core.sin),
        ('tan', None, tf.tan, core.tan),
        ('acos', None, tf.acos, core.acos),
        ('asin', None, tf.asin, core.asin),
        ('atan', None, tf.atan, core.atan),
        ('lgamma', None, tf.lgamma, core.lgamma),
        ('digamma', None, tf.digamma, core.digamma),
        ('erf', None, tf.erf, core.erf),
        ('erfc', None, tf.erfc, core.erfc),
        ('lgamma', None, tf.lgamma, core.lgamma),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    self.test_lt = core.LabeledTensor(
        tf.cast(self.original_lt, tf.float32) / total_size,
        self.original_lt.axes)
项目:TF-Speech-Recognition    作者:ZhishengWang    | 项目源码 | 文件源码
def _cal_seq_len(self, seq_len):
        seq_len = tf.ceil(tf.to_float(seq_len)/2)
        seq_len = tf.ceil((seq_len)/2)
        #seq_len = tf.ceil((seq_len)/2)
        return tf.to_int32(seq_len)
项目:odin    作者:imito    | 项目源码 | 文件源码
def _apply(self, X):
    axes = self.axes
    ndims = X.get_shape().ndims
    if is_string(axes) and axes.lower() == 'auto':
      if ndims == 3:
        axes = (1,)
      elif ndims == 4:
        axes = (1, 2)
      elif ndims == 5:
        axes = (1, 2, 3)
    X = K.upsample(X, scale=self.size, axes=axes, method=self.mode)
    # ====== check output_shape ====== #
    output_shape = self.output_shape
    if output_shape is not None:
      # do padding if necessary
      paddings = [[0, 0] if i is None or o is None or i >= o else
                  [tf.cast(tf.ceil((o - i) / 2), 'int32'),
                   tf.cast(tf.floor((o - i) / 2), 'int32')]
                  for i, o in zip(X.get_shape().as_list(), output_shape)]
      if not all(i == [0, 0] for i in paddings):
        X = tf.pad(X, paddings=paddings, mode='CONSTANT')
      # do slice if necessary
      slices = [slice(tf.cast(tf.floor((i - o) / 2), 'int32'),
                      tf.cast(-tf.ceil((i - o) / 2), 'int32'), None)
                if i > o else slice(None)
                for i, o in zip(X.get_shape().as_list(), output_shape)]
      if any(s is not slice(None) for s in slices):
        X = X[slices]
      K.set_shape(X, tuple([i if is_number(i) else None
                            for i in output_shape]))
    return X
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def bidirectional_rnn(self) -> Tuple[Tuple[tf.Tensor, tf.Tensor],
                                         Tuple[tf.Tensor, tf.Tensor]]:
        # BiRNN Network
        fw_cell, bw_cell = self.rnn_cells()  # type: RNNCellTuple
        seq_lens = tf.ceil(tf.divide(
            self.input_sequence.lengths,
            self.segment_size))
        seq_lens = tf.cast(seq_lens, tf.int32)
        return tf.nn.bidirectional_dynamic_rnn(
            fw_cell, bw_cell, self.highway_layer,
            sequence_length=seq_lens,
            dtype=tf.float32)
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def bidirectional_rnn(self) -> Tuple[Tuple[tf.Tensor, tf.Tensor],
                                         Tuple[tf.Tensor, tf.Tensor]]:
        # BiRNN Network
        fw_cell, bw_cell = self.rnn_cells()  # type: RNNCellTuple
        seq_lens = tf.ceil(tf.divide(
            self.input_sequence.lengths,
            self.segment_size))
        seq_lens = tf.cast(seq_lens, tf.int32)
        return tf.nn.bidirectional_dynamic_rnn(
            fw_cell, bw_cell, self.highway_layer,
            sequence_length=seq_lens,
            dtype=tf.float32)
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def bidirectional_rnn(self) -> Tuple[Tuple[tf.Tensor, tf.Tensor],
                                         Tuple[tf.Tensor, tf.Tensor]]:
        # BiRNN Network
        fw_cell, bw_cell = self.rnn_cells()  # type: RNNCellTuple
        seq_lens = tf.ceil(tf.divide(
            self.input_sequence.lengths,
            self.segment_size))
        seq_lens = tf.cast(seq_lens, tf.int32)
        return tf.nn.bidirectional_dynamic_rnn(
            fw_cell, bw_cell, self.highway_layer,
            sequence_length=seq_lens,
            dtype=tf.float32)
项目:tfdeploy    作者:riga    | 项目源码 | 文件源码
def test_Ceil(self):
        t = tf.ceil(self.random(4, 3) - 0.5)
        self.check(t)
项目:inverse-compositional-STN    作者:ericlin79119    | 项目源码 | 文件源码
def transformCropImage(opt,imageFull,pMtrx):
    with tf.name_scope("transformImage"):
        refMtrx = tf.tile(tf.expand_dims(opt.bboxRefMtrx,axis=0),[opt.batchSize,1,1])
        transMtrx = tf.matmul(refMtrx,pMtrx)
        # warp the canonical coordinates
        X,Y = np.meshgrid(np.linspace(-1,1,opt.W),np.linspace(-1,1,opt.H))
        X,Y = X.flatten(),Y.flatten()
        XYhom = np.stack([X,Y,np.ones_like(X)],axis=1).T
        XYhom = np.tile(XYhom,[opt.batchSize,1,1]).astype(np.float32)
        XYwarpHom = tf.matmul(transMtrx,XYhom)
        XwarpHom,YwarpHom,ZwarpHom = tf.unstack(XYwarpHom,axis=1)
        Xwarp = tf.reshape(XwarpHom/(ZwarpHom+1e-8),[opt.batchSize,opt.H,opt.W])
        Ywarp = tf.reshape(YwarpHom/(ZwarpHom+1e-8),[opt.batchSize,opt.H,opt.W])
        # get the integer sampling coordinates
        Xfloor,Xceil = tf.floor(Xwarp),tf.ceil(Xwarp)
        Yfloor,Yceil = tf.floor(Ywarp),tf.ceil(Ywarp)
        XfloorInt,XceilInt = tf.to_int32(Xfloor),tf.to_int32(Xceil)
        YfloorInt,YceilInt = tf.to_int32(Yfloor),tf.to_int32(Yceil)
        imageIdx = np.tile(np.arange(opt.batchSize).reshape([opt.batchSize,1,1]),[1,opt.H,opt.W])
        imageVec = tf.reshape(imageFull,[-1,int(imageFull.shape[-1])])
        imageVecOut = tf.concat([imageVec,tf.zeros([1,int(imageFull.shape[-1])])],axis=0)
        idxUL = (imageIdx*opt.fullH+YfloorInt)*opt.fullW+XfloorInt
        idxUR = (imageIdx*opt.fullH+YfloorInt)*opt.fullW+XceilInt
        idxBL = (imageIdx*opt.fullH+YceilInt)*opt.fullW+XfloorInt
        idxBR = (imageIdx*opt.fullH+YceilInt)*opt.fullW+XceilInt
        idxOutside = tf.fill([opt.batchSize,opt.H,opt.W],opt.batchSize*opt.fullH*opt.fullW)
        def insideImage(Xint,Yint):
            return (Xint>=0)&(Xint<opt.fullW)&(Yint>=0)&(Yint<opt.fullH)
        idxUL = tf.where(insideImage(XfloorInt,YfloorInt),idxUL,idxOutside)
        idxUR = tf.where(insideImage(XceilInt,YfloorInt),idxUR,idxOutside)
        idxBL = tf.where(insideImage(XfloorInt,YceilInt),idxBL,idxOutside)
        idxBR = tf.where(insideImage(XceilInt,YceilInt),idxBR,idxOutside)
        # bilinear interpolation
        Xratio = tf.reshape(Xwarp-Xfloor,[opt.batchSize,opt.H,opt.W,1])
        Yratio = tf.reshape(Ywarp-Yfloor,[opt.batchSize,opt.H,opt.W,1])
        imageUL = tf.to_float(tf.gather(imageVecOut,idxUL))*(1-Xratio)*(1-Yratio)
        imageUR = tf.to_float(tf.gather(imageVecOut,idxUR))*(Xratio)*(1-Yratio)
        imageBL = tf.to_float(tf.gather(imageVecOut,idxBL))*(1-Xratio)*(Yratio)
        imageBR = tf.to_float(tf.gather(imageVecOut,idxBR))*(Xratio)*(Yratio)
        imageWarp = imageUL+imageUR+imageBL+imageBR
    return imageWarp
项目:inverse-compositional-STN    作者:ericlin79119    | 项目源码 | 文件源码
def transformImage(opt,image,pMtrx):
    with tf.name_scope("transformImage"):
        refMtrx = tf.tile(tf.expand_dims(opt.refMtrx,axis=0),[opt.batchSize,1,1])
        transMtrx = tf.matmul(refMtrx,pMtrx)
        # warp the canonical coordinates
        X,Y = np.meshgrid(np.linspace(-1,1,opt.W),np.linspace(-1,1,opt.H))
        X,Y = X.flatten(),Y.flatten()
        XYhom = np.stack([X,Y,np.ones_like(X)],axis=1).T
        XYhom = np.tile(XYhom,[opt.batchSize,1,1]).astype(np.float32)
        XYwarpHom = tf.matmul(transMtrx,XYhom)
        XwarpHom,YwarpHom,ZwarpHom = tf.unstack(XYwarpHom,axis=1)
        Xwarp = tf.reshape(XwarpHom/(ZwarpHom+1e-8),[opt.batchSize,opt.H,opt.W])
        Ywarp = tf.reshape(YwarpHom/(ZwarpHom+1e-8),[opt.batchSize,opt.H,opt.W])
        # get the integer sampling coordinates
        Xfloor,Xceil = tf.floor(Xwarp),tf.ceil(Xwarp)
        Yfloor,Yceil = tf.floor(Ywarp),tf.ceil(Ywarp)
        XfloorInt,XceilInt = tf.to_int32(Xfloor),tf.to_int32(Xceil)
        YfloorInt,YceilInt = tf.to_int32(Yfloor),tf.to_int32(Yceil)
        imageIdx = np.tile(np.arange(opt.batchSize).reshape([opt.batchSize,1,1]),[1,opt.H,opt.W])
        imageVec = tf.reshape(image,[-1,int(image.shape[-1])])
        imageVecOut = tf.concat([imageVec,tf.zeros([1,int(image.shape[-1])])],axis=0)
        idxUL = (imageIdx*opt.H+YfloorInt)*opt.W+XfloorInt
        idxUR = (imageIdx*opt.H+YfloorInt)*opt.W+XceilInt
        idxBL = (imageIdx*opt.H+YceilInt)*opt.W+XfloorInt
        idxBR = (imageIdx*opt.H+YceilInt)*opt.W+XceilInt
        idxOutside = tf.fill([opt.batchSize,opt.H,opt.W],opt.batchSize*opt.H*opt.W)
        def insideImage(Xint,Yint):
            return (Xint>=0)&(Xint<opt.W)&(Yint>=0)&(Yint<opt.H)
        idxUL = tf.where(insideImage(XfloorInt,YfloorInt),idxUL,idxOutside)
        idxUR = tf.where(insideImage(XceilInt,YfloorInt),idxUR,idxOutside)
        idxBL = tf.where(insideImage(XfloorInt,YceilInt),idxBL,idxOutside)
        idxBR = tf.where(insideImage(XceilInt,YceilInt),idxBR,idxOutside)
        # bilinear interpolation
        Xratio = tf.reshape(Xwarp-Xfloor,[opt.batchSize,opt.H,opt.W,1])
        Yratio = tf.reshape(Ywarp-Yfloor,[opt.batchSize,opt.H,opt.W,1])
        imageUL = tf.to_float(tf.gather(imageVecOut,idxUL))*(1-Xratio)*(1-Yratio)
        imageUR = tf.to_float(tf.gather(imageVecOut,idxUR))*(Xratio)*(1-Yratio)
        imageBL = tf.to_float(tf.gather(imageVecOut,idxBL))*(1-Xratio)*(Yratio)
        imageBR = tf.to_float(tf.gather(imageVecOut,idxBR))*(Xratio)*(Yratio)
        imageWarp = imageUL+imageUR+imageBL+imageBR
    return imageWarp
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def interp(w, i, channel_dim):
  '''
  Input:
    w: A 4D block tensor of shape (n, h, w, c)
    i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
      each having type (int, float, float)

    The 4D block represents a batch of 3D image feature volumes with c channels.
    The input i is a list of points  to index into w via interpolation. Direct
    indexing is not possible due to y_1 and z_1 being float values.
  Output:
    A list of the values: [
      w[x_1, y_1, z_1, :]
      w[x_2, y_2, z_2, :]
      ...
      w[x_k, y_k, z_k, :]
    ]
    of the same length == len(i)
  '''
  w_as_vector = tf.reshape(w, [-1, channel_dim]) # gather expects w to be 1-d
  upper_l = tf.to_int32(tf.concat(axis=1, values=[i[:, 0:1], tf.floor(i[:, 1:2]), tf.floor(i[:, 2:3])]))
  upper_r = tf.to_int32(tf.concat(axis=1, values=[i[:, 0:1], tf.floor(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
  lower_l = tf.to_int32(tf.concat(axis=1, values=[i[:, 0:1], tf.ceil(i[:, 1:2]), tf.floor(i[:, 2:3])]))
  lower_r = tf.to_int32(tf.concat(axis=1, values=[i[:, 0:1], tf.ceil(i[:, 1:2]), tf.ceil(i[:, 2:3])]))

  upper_l_idx = to_idx(upper_l, tf.shape(w))
  upper_r_idx = to_idx(upper_r, tf.shape(w))
  lower_l_idx = to_idx(lower_l, tf.shape(w))
  lower_r_idx = to_idx(lower_r, tf.shape(w))

  upper_l_value = tf.gather(w_as_vector, upper_l_idx)
  upper_r_value = tf.gather(w_as_vector, upper_r_idx)
  lower_l_value = tf.gather(w_as_vector, lower_l_idx)
  lower_r_value = tf.gather(w_as_vector, lower_r_idx)

  alpha_lr = tf.expand_dims(i[:, 2] - tf.floor(i[:, 2]), 1)
  alpha_ud = tf.expand_dims(i[:, 1] - tf.floor(i[:, 1]), 1)

  upper_value = (1 - alpha_lr) * upper_l_value + (alpha_lr) * upper_r_value
  lower_value = (1 - alpha_lr) * lower_l_value + (alpha_lr) * lower_r_value
  value = (1 - alpha_ud) * upper_value + (alpha_ud) * lower_value
  return value
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def max_pool_2d_nxn_regions(inputs, output_size, mode='max'):
    """
    Performs a pooling operation that results in a fixed size:
    output_size x output_size.

    Used by spatial_pyramid_pool. Refer to appendix A in [1].

    Args:
        inputs: A 4D Tensor (B, H, W, C)
        output_size: The output size of the pooling operation.
        mode: The pooling mode {max, avg}

    Returns:
        A list of tensors, for each output bin.
        The list contains output_size * output_size elements, where
        each elment is a Tensor (N, C).

    """
    inputs_shape = tf.shape(inputs)
    h = tf.cast(tf.gather(inputs_shape, 1), tf.int32)
    w = tf.cast(tf.gather(inputs_shape, 2), tf.int32)

    if mode == 'max':
        pooling_op = tf.reduce_max
    elif mode == 'avg':
        pooling_op = tf.reduce_mean
    else:
        msg = "Mode must be either 'max' or 'avg'. Got '{0}'"
        raise ValueError(msg.format(mode))

    result = []
    n = output_size
    for row in range(output_size):
        for col in range(output_size):
            # start_h = floor(row / n * h)
            start_h = tf.cast(
                tf.floor(tf.mul(tf.divide(row, n), tf.cast(h, tf.float32))), tf.int32)
            # end_h = ceil((row + 1) / n * h)
            end_h = tf.cast(
                tf.ceil(tf.mul(tf.divide((row + 1), n), tf.cast(h, tf.float32))), tf.int32)
            # start_w = floor(col / n * w)
            start_w = tf.cast(
                tf.floor(tf.mul(tf.divide(col, n), tf.cast(w, tf.float32))), tf.int32)
            # end_w = ceil((col + 1) / n * w)
            end_w = tf.cast(
                tf.ceil(tf.mul(tf.divide((col + 1), n), tf.cast(w, tf.float32))), tf.int32)
            pooling_region = inputs[:, start_h:end_h, start_w:end_w, :]
            pool_result = pooling_op(pooling_region, axis=(1, 2))
            result.append(pool_result)
    return result
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def spatial_pyramid_pool(inputs, dimensions=[2, 1], mode='max', implementation='kaiming'):
    """
    Performs spatial pyramid pooling (SPP) over the input.
    It will turn a 2D input of arbitrary size into an output of fixed
    dimenson.
    Hence, the convlutional part of a DNN can be connected to a dense part
    with a fixed number of nodes even if the dimensions of the input
    image are unknown.

    The pooling is performed over :math:`l` pooling levels.
    Each pooling level :math:`i` will create :math:`M_i` output features.
    :math:`M_i` is given by :math:`n_i * n_i`, with :math:`n_i` as the number
    of pooling operations per dimension level :math:`i`.

    The length of the parameter dimensions is the level of the spatial pyramid.

    Args:
        inputs: A 4D Tensor (B, H, W, C).
        dimensions: The list of :math:`n_i`'s that define the output dimension
        of each pooling level :math:`i`. The length of dimensions is the level of
        the spatial pyramid.
        mode: Pooling mode 'max' or 'avg'.
        implementation: The implementation to use, either 'kaiming' or 'fast'.
        kamming is the original implementation from the paper, and supports variable
        sizes of input vectors, which fast does not support.

    Returns:
        A fixed length vector representing the inputs.

    Notes:
        SPP should be inserted between the convolutional part of a DNN and it's
        dense part. Convolutions can be used for arbitrary input dimensions, but
        the size of their output will depend on their input dimensions.
        Connecting the output of the convolutional to the dense part then
        usually demands us to fix the dimensons of the network's input.
        The spatial pyramid pooling layer, however, allows us to leave
        the network input dimensions arbitrary.
        The advantage over a global pooling layer is the added robustness
        against object deformations due to the pooling on different scales.

    """
    pool_list = []
    if implementation == 'kaiming':
        for pool_dim in dimensions:
            pool_list += max_pool_2d_nxn_regions(inputs, pool_dim, mode)
    else:
        shape = inputs.get_shape().as_list()
        for d in dimensions:
            h = shape[1]
            w = shape[2]
            ph = np.ceil(h * 1.0 / d).astype(np.int32)
            pw = np.ceil(w * 1.0 / d).astype(np.int32)
            sh = np.floor(h * 1.0 / d + 1).astype(np.int32)
            sw = np.floor(w * 1.0 / d + 1).astype(np.int32)
            pool_result = tf.nn.max_pool(inputs,
                                         ksize=[1, ph, pw, 1],
                                         strides=[1, sh, sw, 1],
                                         padding='SAME')
            pool_list.append(tf.reshape(
                pool_result, [tf.shape(inputs)[0], -1]))
    return tf.concat(1, pool_list)
项目:TensorBoxPy3    作者:SMH17    | 项目源码 | 文件源码
def interp(w, i, channel_dim):
    '''
    Input:
        w: A 4D block tensor of shape (n, h, w, c)
        i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
            each having type (int, float, float)

        The 4D block represents a batch of 3D image feature volumes with c channels.
        The input i is a list of points  to index into w via interpolation. Direct
        indexing is not possible due to y_1 and z_1 being float values.
    Output:
        A list of the values: [
            w[x_1, y_1, z_1, :]
            w[x_2, y_2, z_2, :]
            ...
            w[x_k, y_k, z_k, :]
        ]
        of the same length == len(i)
    '''
    w_as_vector = tf.reshape(w, [-1, channel_dim]) # gather expects w to be 1-d
    upper_l = tf.to_int32(tf_concat(axis=1, values=[i[:, 0:1], tf.floor(i[:, 1:2]), tf.floor(i[:, 2:3])]))
    upper_r = tf.to_int32(tf_concat(axis=1, values=[i[:, 0:1], tf.floor(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
    lower_l = tf.to_int32(tf_concat(axis=1, values=[i[:, 0:1], tf.ceil(i[:, 1:2]), tf.floor(i[:, 2:3])]))
    lower_r = tf.to_int32(tf_concat(axis=1, values=[i[:, 0:1], tf.ceil(i[:, 1:2]), tf.ceil(i[:, 2:3])]))

    upper_l_idx = to_idx(upper_l, tf.shape(w))
    upper_r_idx = to_idx(upper_r, tf.shape(w))
    lower_l_idx = to_idx(lower_l, tf.shape(w))
    lower_r_idx = to_idx(lower_r, tf.shape(w))

    upper_l_value = tf.gather(w_as_vector, upper_l_idx)
    upper_r_value = tf.gather(w_as_vector, upper_r_idx)
    lower_l_value = tf.gather(w_as_vector, lower_l_idx)
    lower_r_value = tf.gather(w_as_vector, lower_r_idx)

    alpha_lr = tf.expand_dims(i[:, 2] - tf.floor(i[:, 2]), 1)
    alpha_ud = tf.expand_dims(i[:, 1] - tf.floor(i[:, 1]), 1)

    upper_value = (1 - alpha_lr) * upper_l_value + (alpha_lr) * upper_r_value
    lower_value = (1 - alpha_lr) * lower_l_value + (alpha_lr) * lower_r_value
    value = (1 - alpha_ud) * upper_value + (alpha_ud) * lower_value
    return value
项目:deep-group-happiness    作者:acerekovic    | 项目源码 | 文件源码
def interp(w, i, channel_dim):
    '''
    Input:
        w: A 4D block tensor of shape (n, h, w, c)
        i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
            each having type (int, float, float)

        The 4D block represents a batch of 3D image feature volumes with c channels.
        The input i is a list of points  to index into w via interpolation. Direct
        indexing is not possible due to y_1 and z_1 being float values.
    Output:
        A list of the values: [
            w[x_1, y_1, z_1, :]
            w[x_2, y_2, z_2, :]
            ...
            w[x_k, y_k, z_k, :]
        ]
        of the same length == len(i)
    '''
    w_as_vector = tf.reshape(w, [-1, channel_dim])  # gather expects w to be 1-d
    upper_l = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.floor(i[:, 2:3])]))
    upper_r = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
    lower_l = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.floor(i[:, 2:3])]))
    lower_r = tf.to_int32(tf.concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.ceil(i[:, 2:3])]))

    upper_l_idx = to_idx(upper_l, tf.shape(w))
    upper_r_idx = to_idx(upper_r, tf.shape(w))
    lower_l_idx = to_idx(lower_l, tf.shape(w))
    lower_r_idx = to_idx(lower_r, tf.shape(w))

    upper_l_value = tf.gather(w_as_vector, upper_l_idx)
    upper_r_value = tf.gather(w_as_vector, upper_r_idx)
    lower_l_value = tf.gather(w_as_vector, lower_l_idx)
    lower_r_value = tf.gather(w_as_vector, lower_r_idx)

    alpha_lr = tf.expand_dims(i[:, 2] - tf.floor(i[:, 2]), 1)
    alpha_ud = tf.expand_dims(i[:, 1] - tf.floor(i[:, 1]), 1)

    upper_value = (1 - alpha_lr) * upper_l_value + (alpha_lr) * upper_r_value
    lower_value = (1 - alpha_lr) * lower_l_value + (alpha_lr) * lower_r_value
    value = (1 - alpha_ud) * upper_value + (alpha_ud) * lower_value
    return value
项目:TensorBox    作者:Russell91    | 项目源码 | 文件源码
def interp(w, i, channel_dim):
    '''
    Input:
        w: A 4D block tensor of shape (n, h, w, c)
        i: A list of 3-tuples [(x_1, y_1, z_1), (x_2, y_2, z_2), ...],
            each having type (int, float, float)

        The 4D block represents a batch of 3D image feature volumes with c channels.
        The input i is a list of points  to index into w via interpolation. Direct
        indexing is not possible due to y_1 and z_1 being float values.
    Output:
        A list of the values: [
            w[x_1, y_1, z_1, :]
            w[x_2, y_2, z_2, :]
            ...
            w[x_k, y_k, z_k, :]
        ]
        of the same length == len(i)
    '''
    w_as_vector = tf.reshape(w, [-1, channel_dim]) # gather expects w to be 1-d
    upper_l = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.floor(i[:, 2:3])]))
    upper_r = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.floor(i[:, 1:2]), tf.ceil(i[:, 2:3])]))
    lower_l = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.floor(i[:, 2:3])]))
    lower_r = tf.to_int32(tf_concat(1, [i[:, 0:1], tf.ceil(i[:, 1:2]), tf.ceil(i[:, 2:3])]))

    upper_l_idx = to_idx(upper_l, tf.shape(w))
    upper_r_idx = to_idx(upper_r, tf.shape(w))
    lower_l_idx = to_idx(lower_l, tf.shape(w))
    lower_r_idx = to_idx(lower_r, tf.shape(w))

    upper_l_value = tf.gather(w_as_vector, upper_l_idx)
    upper_r_value = tf.gather(w_as_vector, upper_r_idx)
    lower_l_value = tf.gather(w_as_vector, lower_l_idx)
    lower_r_value = tf.gather(w_as_vector, lower_r_idx)

    alpha_lr = tf.expand_dims(i[:, 2] - tf.floor(i[:, 2]), 1)
    alpha_ud = tf.expand_dims(i[:, 1] - tf.floor(i[:, 1]), 1)

    upper_value = (1 - alpha_lr) * upper_l_value + (alpha_lr) * upper_r_value
    lower_value = (1 - alpha_lr) * lower_l_value + (alpha_lr) * lower_r_value
    value = (1 - alpha_ud) * upper_value + (alpha_ud) * lower_value
    return value
项目:inverse-compositional-STN    作者:ericlin79119    | 项目源码 | 文件源码
def transformImage(opt,image,pMtrx):
    with tf.name_scope("transformImage"):
        refMtrx = tf.tile(tf.expand_dims(opt.refMtrx,axis=0),[opt.batchSize,1,1])
        transMtrx = tf.matmul(refMtrx,pMtrx)
        # warp the canonical coordinates
        X,Y = np.meshgrid(np.linspace(-1,1,opt.W),np.linspace(-1,1,opt.H))
        X,Y = X.flatten(),Y.flatten()
        XYhom = np.stack([X,Y,np.ones_like(X)],axis=1).T
        XYhom = np.tile(XYhom,[opt.batchSize,1,1]).astype(np.float32)
        XYwarpHom = tf.matmul(transMtrx,XYhom)
        XwarpHom,YwarpHom,ZwarpHom = tf.unstack(XYwarpHom,axis=1)
        Xwarp = tf.reshape(XwarpHom/(ZwarpHom+1e-8),[opt.batchSize,opt.H,opt.W])
        Ywarp = tf.reshape(YwarpHom/(ZwarpHom+1e-8),[opt.batchSize,opt.H,opt.W])
        # get the integer sampling coordinates
        Xfloor,Xceil = tf.floor(Xwarp),tf.ceil(Xwarp)
        Yfloor,Yceil = tf.floor(Ywarp),tf.ceil(Ywarp)
        XfloorInt,XceilInt = tf.to_int32(Xfloor),tf.to_int32(Xceil)
        YfloorInt,YceilInt = tf.to_int32(Yfloor),tf.to_int32(Yceil)
        imageIdx = np.tile(np.arange(opt.batchSize).reshape([opt.batchSize,1,1]),[1,opt.H,opt.W])
        imageVec = tf.reshape(image,[-1,int(image.shape[-1])])
        imageVecOut = tf.concat([imageVec,tf.zeros([1,int(image.shape[-1])])],axis=0)
        idxUL = (imageIdx*opt.H+YfloorInt)*opt.W+XfloorInt
        idxUR = (imageIdx*opt.H+YfloorInt)*opt.W+XceilInt
        idxBL = (imageIdx*opt.H+YceilInt)*opt.W+XfloorInt
        idxBR = (imageIdx*opt.H+YceilInt)*opt.W+XceilInt
        idxOutside = tf.fill([opt.batchSize,opt.H,opt.W],opt.batchSize*opt.H*opt.W)
        def insideImage(Xint,Yint):
            return (Xint>=0)&(Xint<opt.W)&(Yint>=0)&(Yint<opt.H)
        idxUL = tf.where(insideImage(XfloorInt,YfloorInt),idxUL,idxOutside)
        idxUR = tf.where(insideImage(XceilInt,YfloorInt),idxUR,idxOutside)
        idxBL = tf.where(insideImage(XfloorInt,YceilInt),idxBL,idxOutside)
        idxBR = tf.where(insideImage(XceilInt,YceilInt),idxBR,idxOutside)
        # bilinear interpolation
        Xratio = tf.reshape(Xwarp-Xfloor,[opt.batchSize,opt.H,opt.W,1])
        Yratio = tf.reshape(Ywarp-Yfloor,[opt.batchSize,opt.H,opt.W,1])
        imageUL = tf.to_float(tf.gather(imageVecOut,idxUL))*(1-Xratio)*(1-Yratio)
        imageUR = tf.to_float(tf.gather(imageVecOut,idxUR))*(Xratio)*(1-Yratio)
        imageBL = tf.to_float(tf.gather(imageVecOut,idxBL))*(1-Xratio)*(Yratio)
        imageBR = tf.to_float(tf.gather(imageVecOut,idxBR))*(Xratio)*(Yratio)
        imageWarp = imageUL+imageUR+imageBL+imageBR
    return imageWarp

# warp the image
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def pad_to_multiple(tensor, multiple):
  """Returns the tensor zero padded to the specified multiple.

  Appends 0s to the end of the first and second dimension (height and width) of
  the tensor until both dimensions are a multiple of the input argument
  'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input
  multiple of 4, PadToMultiple will append 0s so that the resulting tensor will
  be of shape [1, 4, 8, 1].

  Args:
    tensor: rank 4 float32 tensor, where
            tensor -> [batch_size, height, width, channels].
    multiple: the multiple to pad to.

  Returns:
    padded_tensor: the tensor zero padded to the specified multiple.
  """
  tensor_shape = tensor.get_shape()
  batch_size = static_shape.get_batch_size(tensor_shape)
  tensor_height = static_shape.get_height(tensor_shape)
  tensor_width = static_shape.get_width(tensor_shape)
  tensor_depth = static_shape.get_depth(tensor_shape)

  if batch_size is None:
    batch_size = tf.shape(tensor)[0]

  if tensor_height is None:
    tensor_height = tf.shape(tensor)[1]
    padded_tensor_height = tf.to_int32(
        tf.ceil(tf.to_float(tensor_height) / tf.to_float(multiple))) * multiple
  else:
    padded_tensor_height = int(
        math.ceil(float(tensor_height) / multiple) * multiple)

  if tensor_width is None:
    tensor_width = tf.shape(tensor)[2]
    padded_tensor_width = tf.to_int32(
        tf.ceil(tf.to_float(tensor_width) / tf.to_float(multiple))) * multiple
  else:
    padded_tensor_width = int(
        math.ceil(float(tensor_width) / multiple) * multiple)

  if tensor_depth is None:
    tensor_depth = tf.shape(tensor)[3]

  # Use tf.concat instead of tf.pad to preserve static shape
  height_pad = tf.zeros([
      batch_size, padded_tensor_height - tensor_height, tensor_width,
      tensor_depth
  ])
  padded_tensor = tf.concat([tensor, height_pad], 1)
  width_pad = tf.zeros([
      batch_size, padded_tensor_height, padded_tensor_width - tensor_width,
      tensor_depth
  ])
  padded_tensor = tf.concat([padded_tensor, width_pad], 2)

  return padded_tensor