Python tensorflow 模块,depth_to_space() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用tensorflow.depth_to_space()

项目:SRGAN-tensorflow    作者:zoharli    | 项目源码 | 文件源码
def __init__(self,T,train_mode=1,name='srResNet'):
        with tf.variable_scope(name):
            self.train_mode=train_mode
            conv1=conv_layer(T,[5,5,3,64],1)
            relu1=leaky_relu(conv1)
            block=[]
            for i in xrange(16):
                block.append(self.residual_block(block[-1] if i else relu1))
            conv2=conv_layer(block[-1],[3,3,64,64],1)
            bn1=batch_norm(conv2) if self.train_mode else conv2
            sum1=tf.add(bn1,relu1)
            conv3=conv_layer(sum1,[3,3,64,256],1)
            ps1=tf.depth_to_space(conv3,2) #pixel-shuffle
            relu2=leaky_relu(ps1)
            conv4=conv_layer(relu2,[3,3,64,256],1)
            ps2=tf.depth_to_space(conv4,2)
            relu3=leaky_relu(ps2)
            self.conv5=conv_layer(relu3,[3,3,64,3],1)
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def depth_to_space(inputs, block_size, name='d2s', data_format='channels_last'):
    """ 1d, 2d and 3d depth_to_space transformation.

    Parameters
    ----------
    inputs : tf.Tensor
        a tensor to resize
    block_size : int
        An int that is >= 2. The size of the spatial block
    name : str
        scope name
    data_format : {'channels_last', 'channels_first'}
        position of the channels dimension

    Returns
    -------
    tf.Tensor

    See also
    --------
    `tf.depth_to_space <https://www.tensorflow.org/api_docs/python/tf/depth_to_space>`_
    """
    dim = inputs.shape.ndims - 2
    if dim == 2:
        dafo = 'NHWC' if data_format == 'channels_last' else 'NCHW'
        return tf.depth_to_space(inputs, block_size, name, data_format=dafo)
    else:
        if data_format == 'channels_first':
            inputs = tf.transpose(inputs, [0] + list(range(2, dim+2)) + [1])
        x = _depth_to_space(inputs, block_size, name)
        if data_format == 'channels_first':
            x = tf.transpose(x, [0, dim+1] + list(range(1, dim+1)))
    return x
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def _pool_overlay(self, pool, overlay):

        pool = tf.where(overlay, pool, self.zeros)
        pool_shape = self.shapes[2]        
        return tf.depth_to_space(tf.reshape(pool, pool_shape[:3] + \
                                    [pool_shape[3] * pool_shape[4]]),
                                 self.pool_side)
项目:GANGogh    作者:rkjones4    | 项目源码 | 文件源码
def SubpixelConv2D(*args, **kwargs):
    kwargs['output_dim'] = 4*kwargs['output_dim']
    output = lib.ops.conv2d.Conv2D(*args, **kwargs)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    return output
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def depth_to_space(input, scale, data_format=None):
    ''' Uses phase shift algorithm to convert channels/depth for spatial resolution '''
    if data_format is None:
        data_format = image_data_format()
    data_format = data_format.lower()
    input = _preprocess_conv2d_input(input, data_format)
    out = tf.depth_to_space(input, scale)
    out = _postprocess_conv2d_output(out, data_format)
    return out
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def subpixel_conv(inputs, factor=2, name='subpixel', data_format='channels_last', **kwargs):
    """ Resize input tensor with subpixel convolution (depth to space operation)

    Parameters
    ----------
    inputs : tf.Tensor
        a tensor to resize
    factor : int
        upsampling factor
    name : str
        scope name
    data_format : {'channels_last', 'channels_first'}
        position of the channels dimension

    Returns
    -------
    tf.Tensor
    """
    dim = inputs.shape.ndims - 2

    _, channels = _calc_size(inputs, factor, data_format)
    layout = kwargs.get('layout', 'cna')
    kwargs['filters'] = channels*factor**dim

    with tf.variable_scope(name):
        x = conv_block(inputs, layout, kernel_size=1, name='conv', data_format=data_format, **kwargs)
        x = depth_to_space(x, block_size=factor, name='d2s', data_format=data_format)
    return x
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def get_tf_predictions_reorganize(X, params):
    Hin = params["H"]
    Win = params["W"]
    Cin = params["C"]
    with tf.Graph().as_default(), tf.Session() as sess:
        x = tf.placeholder(tf.float32, shape=(1,Hin,Win,Cin))
        if params["mode"] == 'SPACE_TO_DEPTH': 
            y = tf.space_to_depth(x, params["block_size"])
        else:
            y = tf.depth_to_space(x, params["block_size"])

    return sess.run(y,feed_dict={x: X})
项目:improved_wgan_training    作者:YuguangTong    | 项目源码 | 文件源码
def SubpixelConv2D(*args, **kwargs):
    kwargs['output_dim'] = 4*kwargs['output_dim']
    output = lib.ops.conv2d.Conv2D(*args, **kwargs)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    return output
项目:improved_wgan_training    作者:YuguangTong    | 项目源码 | 文件源码
def SubpixelConv2D(*args, **kwargs):
    kwargs['output_dim'] = 4*kwargs['output_dim']
    output = lib.ops.conv2d.Conv2D(*args, **kwargs)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    return output
项目:improved_wgan_training    作者:YuguangTong    | 项目源码 | 文件源码
def SubpixelConv2D(*args, **kwargs):
    kwargs['output_dim'] = 4*kwargs['output_dim']
    output = lib.ops.conv2d.Conv2D(*args, **kwargs)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    return output
项目:improved_wgan_training    作者:igul222    | 项目源码 | 文件源码
def SubpixelConv2D(*args, **kwargs):
    kwargs['output_dim'] = 4*kwargs['output_dim']
    output = lib.ops.conv2d.Conv2D(*args, **kwargs)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    return output
项目:improved_wgan_training    作者:igul222    | 项目源码 | 文件源码
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
    output = inputs
    output = tf.concat([output, output, output, output], axis=1)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
    return output
项目:improved_wgan_training    作者:igul222    | 项目源码 | 文件源码
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
    output = inputs
    output = tf.concat([output, output, output, output], axis=1)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
    return output
项目:DenseNet    作者:titu1994    | 项目源码 | 文件源码
def depth_to_space(input, scale, data_format=None):
    ''' Uses phase shift algorithm to convert channels/depth for spatial resolution '''
    if data_format is None:
        data_format = image_data_format()

    if data_format == 'channels_first':
        data_format = 'NCHW'
    else:
        data_format = 'NHWC'

    data_format = data_format.lower()
    out = tf.depth_to_space(input, scale, data_format=data_format)
    return out
项目:PixelVAE    作者:igul222    | 项目源码 | 文件源码
def SubpixelConv2D(*args, **kwargs):
                kwargs['output_dim'] = 4*kwargs['output_dim']
                output = lib.ops.conv2d.Conv2D(*args, **kwargs)
                output = tf.transpose(output, [0,2,3,1])
                output = tf.depth_to_space(output, 2)
                output = tf.transpose(output, [0,3,1,2])
                return output
项目:GAN-general    作者:weilinie    | 项目源码 | 文件源码
def subpixelConv2D(*args, **kwargs):
    kwargs['num_outputs'] = 4*kwargs['num_outputs']
    output = tcl.conv2d(*args, **kwargs)
    output = tf.depth_to_space(output, 2)
    return output
项目:Fully-Connected-DenseNets-Semantic-Segmentation    作者:titu1994    | 项目源码 | 文件源码
def depth_to_space_tf(input, scale, data_format=None):
    ''' Uses phase shift algorithm to convert channels/depth for spatial resolution '''
    import tensorflow as tf
    if data_format is None:
        data_format = K.image_dim_ordering()
    data_format = data_format.lower()
    input = K._preprocess_conv2d_input(input, data_format)
    out = tf.depth_to_space(input, scale)
    out = K._postprocess_conv2d_output(out, data_format)
    return out
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def decompress_step(source, hparams, first_relu, is_2d, name):
  """Decompression function."""
  with tf.variable_scope(name):
    shape = common_layers.shape_list(source)
    multiplier = 4 if is_2d else 2
    kernel = (1, 1) if is_2d else (1, 1)
    thicker = common_layers.conv_block(
        source, hparams.hidden_size * multiplier, [((1, 1), kernel)],
        first_relu=first_relu, name="decompress_conv")
    if is_2d:
      return tf.depth_to_space(thicker, 2)
    return tf.reshape(thicker, [shape[0], shape[1] * 2, 1, hparams.hidden_size])
项目:EDSR    作者:iwtw    | 项目源码 | 文件源码
def upsample( inputs , scale , dim    , upsample_method = "subpixel" ,  activation_fn = None , regularization_scale = 0.0 ):
    "upsample layer"
    act = activation_fn
    if act == None:
        act = tf.identity
    #with tf.variable_scope(scope) as scope :
    if upsample_method == "subpixel":
        if scale == 2 :
            outputs = conv2d(  inputs ,  dim * 2**2, 3 , 1 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn ,  regularization_scale = regularization_scale ) 
            outputs = tf.depth_to_space( outputs , 2 )
            outputs = act( outputs )
        elif scale == 3 :
            outputs = conv2d( inputs , dim * 3**2 , 3 , 1 ,  he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale  )
            outputs = tf.depth_to_space( outputs , 3 )
            outputs = act( outputs )
        elif scale == 4 :
            outputs = conv2d(  inputs ,  dim * 2**2, 3 , 1 , regularization_scale = regularization_scale  ) 
            outputs = tf.depth_to_space( outputs , 2 )
            outputs = conv2d(  outputs ,  dim * 2**2 , 3 , 1 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale   ) 
            outputs = tf.depth_to_space( outputs , 2 )
            outputs = act( outputs )
    elif upsample_method == "conv_transpose":
        if scale == 2 :
            outputs = utils.conv2d_transpose( inputs , dim , 3 , 2 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale   )
            outputs = act( outputs )
        elif scale == 3:
            outputs = utils.conv2d_transpose( inputs , dim , 3 , 3 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale   )
            outputs = act( outputs )
        elif scale == 4:
            outputs = utils.conv2d_transpose( inputs , dim , 3 , 2 , regularization_scale = regularization_scale )  
            outputs = utils.conv2d_transpose( outputs , dim , 3 , 2 , he_init = (activation_fn == tf.nn.relu ) , activation_fn = activation_fn , regularization_scale = regularization_scale  )
            outputs = act( outputs )

    return outputs
项目:real-nvp    作者:taesung89    | 项目源码 | 文件源码
def backward(self, y, z):
    ys = int_shape(y)
    assert ys[3] % 4 == 0
    x = tf.depth_to_space(y,2)

    if z is not None:
      z = tf.depth_to_space(z,2)

    return x, z

# The layer that factors out half of the variables
# directly to the latent space.
项目:tensor2tensor    作者:tensorflow    | 项目源码 | 文件源码
def deconv_stride2_multistep(x,
                             nbr_steps,
                             output_filters,
                             name=None,
                             reuse=None):
  """Use a deconvolution to upsample x by 2**`nbr_steps`.

  Args:
    x: a `Tensor` with shape `[batch, spatial, depth]` or
     `[batch, spatial_1, spatial_2, depth]`
    nbr_steps: an int specifying the number of doubling upsample rounds to
     apply.
    output_filters: an int specifying the filter count for the deconvolutions
    name: a string
    reuse: a boolean

  Returns:
    a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
     `[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
       output_filters]`
  """
  with tf.variable_scope(
      name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):

    def deconv1d(cur, i):
      cur_shape = shape_list(cur)
      thicker = conv(
          cur,
          output_filters * 2, (1, 1),
          padding="SAME",
          activation=tf.nn.relu,
          name="deconv1d" + str(i))
      return tf.reshape(thicker,
                        [cur_shape[0], cur_shape[1] * 2, 1, output_filters])

    def deconv2d(cur, i):
      thicker = conv(
          cur,
          output_filters * 4, (1, 1),
          padding="SAME",
          activation=tf.nn.relu,
          name="deconv2d" + str(i))
      return tf.depth_to_space(thicker, 2)

    cur = x
    for i in xrange(nbr_steps):
      if cur.get_shape()[2] == 1:
        cur = deconv1d(cur, i)
      else:
        cur_dim = shape_list(cur)[2]
        if isinstance(cur_dim, int):
          if cur_dim == 1:
            cur = deconv1d(cur, i)
          else:
            cur = deconv2d(cur, i)
        else:
          cur = tf.cond(
              tf.equal(cur_dim, 1),
              lambda idx=i: deconv1d(cur, idx),
              lambda idx=i: deconv2d(cur, idx))
    return cur