Python tensorflow 模块,batch_to_space() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用tensorflow.batch_to_space()

项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def imageRearrange(self, image, block=4):
        image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
        x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
        image_r = tf.reshape(tf.transpose(tf.reshape(x1,
            [self.output_size, block, self.output_size, block, self.c_dim])
            , [1, 0, 3, 2, 4]),
            [1, self.output_size * block, self.output_size * block, self.c_dim])
        return image_r
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def imageRearrange(self, image, block=4):
        image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
        x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
        image_r = tf.reshape(tf.transpose(tf.reshape(x1,
            [self.output_size, block, self.output_size, block, self.c_dim])
            , [1, 0, 3, 2, 4]),
            [1, self.output_size * block, self.output_size * block, self.c_dim])
        return image_r
项目:opt-mmd    作者:dougalsutherland    | 项目源码 | 文件源码
def imageRearrange(self, image, block=4):
        image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
        x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
        image_r = tf.reshape(tf.transpose(tf.reshape(x1,
            [self.output_size, block, self.output_size, block, self.c_dim])
            , [1, 0, 3, 2, 4]),
            [1, self.output_size * block, self.output_size * block, self.c_dim])
        return image_r
项目:tf_img_tech    作者:david-berthelot    | 项目源码 | 文件源码
def unboxn(vin, n):
    """vin = (batch, h, w, depth), returns vout = (batch, n*h, n*w, depth), each pixel is duplicated."""
    s = tf.shape(vin)
    vout = tf.concat(0, [vin] * (n ** 2))  # Poor man's replacement for tf.tile (required for Adversarial Training support).
    vout = tf.reshape(vout, [s[0] * (n ** 2), s[1], s[2], s[3]])
    vout = tf.batch_to_space(vout, [[0, 0], [0, 0]], n)
    return vout
项目:tf_img_tech    作者:david-berthelot    | 项目源码 | 文件源码
def __call__(self, vin):
        # TODO: replace with atrous_2d
        vout = tf.space_to_batch(vin, [[0, 0], [0, 0]], self.dilation)
        vout = LayerConv.__call__(self, vout)
        vout = tf.batch_to_space(vout, [[0, 0], [0, 0]], self.dilation)
        return vout
项目:BinaryNet.tf    作者:itayhubara    | 项目源码 | 文件源码
def group_batch_images(x):
    sz = x.get_shape().as_list()
    num_cols = int(math.sqrt(sz[0]))
    img = tf.slice(x, [0,0,0,0],[num_cols ** 2, -1, -1, -1])
    img = tf.batch_to_space(img, [[0,0],[0,0]], num_cols)

    return img
项目:inverse-compositional-STN    作者:ericlin79119    | 项目源码 | 文件源码
def imageSummary(opt,image,tag,H,W):
    blockSize = opt.visBlockSize
    imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize)
    imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1])
    imageTransp = tf.transpose(imagePermute,[1,0,3,2,4])
    imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1])
    imageBlocks = tf.cast(imageBlocks*255,tf.uint8)
    summary = tf.summary.image(tag,imageBlocks)
    return summary

# make image summary from image batch (mean/variance)
项目:inverse-compositional-STN    作者:ericlin79119    | 项目源码 | 文件源码
def imageSummary(opt,image,tag,H,W):
    blockSize = opt.visBlockSize
    imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize)
    imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1])
    imageTransp = tf.transpose(imagePermute,[1,0,3,2,4])
    imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1])
    imageBlocks = tf.cast(imageBlocks*255,tf.uint8)
    summary = tf.summary.image(tag,imageBlocks)
    return summary

# make image summary from image batch (mean/variance)
项目:pgnet    作者:galeone    | 项目源码 | 文件源码
def atrous_conv2d(value, filters, rate, name):
    """ Returns the result of a convolution with holes from value and filters.
    Do not use the tensorflow implementation because of issues with shape definition
    of the result. The semantic is the same.
    It uses only the "VALID" padding.

    Warning: this implementation is PGNet specific. It's used only to define the last
    convolutional layer and therefore depends on pgnet constants
    """

    pad_top = 0
    pad_bottom = 0
    pad_left = 0
    pad_right = 0

    in_height = value.get_shape()[1].value + pad_top + pad_bottom
    in_width = value.get_shape()[2].value + pad_left + pad_right

    # More padding so that rate divides the height and width of the input.
    pad_bottom_extra = (rate - in_height % rate) % rate
    pad_right_extra = (rate - in_width % rate) % rate

    # The paddings argument to space_to_batch includes both padding components.
    space_to_batch_pad = ((pad_top, pad_bottom + pad_bottom_extra),
                          (pad_left, pad_right + pad_right_extra))

    value = tf.space_to_batch(
        input=value, paddings=space_to_batch_pad, block_size=rate)

    value = tf.nn.conv2d(
        input=value,
        filter=filters,
        strides=(1, LAST_CONV_OUTPUT_STRIDE, LAST_CONV_OUTPUT_STRIDE, 1),
        padding="VALID",
        name=name)

    # The crops argument to batch_to_space is just the extra padding component.
    batch_to_space_crop = ((0, pad_bottom_extra), (0, pad_right_extra))

    value = tf.batch_to_space(
        input=value, crops=batch_to_space_crop, block_size=rate)

    return value