Python tensorflow 模块,concat_v2() 实例源码

我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用tensorflow.concat_v2()

项目:How-to-Learn-from-Little-Data    作者:llSourcell    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:NTM-One-Shot-TF    作者:hmishra2250    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    """Concatenates a list of tensors alongside the specified axis.

    # Returns
        A tensor.
    """
    if axis < 0:
        dims = ndim(tensors[0])
        if dims:
            axis = axis % dims
        else:
            axis = 0

    if py_all([is_sparse(x) for x in tensors]):
        return tf.sparse_concat(axis, tensors)
    else:
        if tf_major_version >= 1:
            return tf.concat([to_dense(x) for x in tensors], axis)
        else:
            try:
                return tf.concat_v2([to_dense(x) for x in tensors], axis)
            except AttributeError:
                return tf.concat(axis, [to_dense(x) for x in tensors])
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
  """Builds the 35x35 resnet block."""
  with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
    with tf.variable_scope('Branch_0'):
      tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
    with tf.variable_scope('Branch_1'):
      tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
      tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
    with tf.variable_scope('Branch_2'):
      tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
      tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
      tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
    mixed = tf.concat_v2([tower_conv, tower_conv1_1, tower_conv2_2], 3)
    up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                     activation_fn=None, scope='Conv2d_1x1')
    net += scale * up
    if activation_fn:
      net = activation_fn(net)
  return net
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
  """Builds the 17x17 resnet block."""
  with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
    with tf.variable_scope('Branch_0'):
      tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
    with tf.variable_scope('Branch_1'):
      tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
      tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
                                  scope='Conv2d_0b_1x7')
      tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
                                  scope='Conv2d_0c_7x1')
    mixed = tf.concat_v2([tower_conv, tower_conv1_2], 3)
    up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                     activation_fn=None, scope='Conv2d_1x1')
    net += scale * up
    if activation_fn:
      net = activation_fn(net)
  return net
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
  """Builds the 8x8 resnet block."""
  with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
    with tf.variable_scope('Branch_0'):
      tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
    with tf.variable_scope('Branch_1'):
      tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
      tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
                                  scope='Conv2d_0b_1x3')
      tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
                                  scope='Conv2d_0c_3x1')
    mixed = tf.concat_v2([tower_conv, tower_conv1_2], 3)
    up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                     activation_fn=None, scope='Conv2d_1x1')
    net += scale * up
    if activation_fn:
      net = activation_fn(net)
  return net
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block_inception_a(inputs, scope=None, reuse=None):
  """Builds Inception-A block for Inception v4 network."""
  # By default use stride=1 and SAME padding
  with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
                      stride=1, padding='SAME'):
    with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
      with tf.variable_scope('Branch_0'):
        branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
      with tf.variable_scope('Branch_1'):
        branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
        branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
      with tf.variable_scope('Branch_2'):
        branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
        branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
        branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
      with tf.variable_scope('Branch_3'):
        branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
        branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
      return tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block_reduction_a(inputs, scope=None, reuse=None):
  """Builds Reduction-A block for Inception v4 network."""
  # By default use stride=1 and SAME padding
  with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
                      stride=1, padding='SAME'):
    with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
      with tf.variable_scope('Branch_0'):
        branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
                               scope='Conv2d_1a_3x3')
      with tf.variable_scope('Branch_1'):
        branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
        branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
        branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
                               padding='VALID', scope='Conv2d_1a_3x3')
      with tf.variable_scope('Branch_2'):
        branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
                                   scope='MaxPool_1a_3x3')
      return tf.concat_v2([branch_0, branch_1, branch_2], 3)
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block_reduction_b(inputs, scope=None, reuse=None):
  """Builds Reduction-B block for Inception v4 network."""
  # By default use stride=1 and SAME padding
  with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
                      stride=1, padding='SAME'):
    with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
      with tf.variable_scope('Branch_0'):
        branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
        branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
                               padding='VALID', scope='Conv2d_1a_3x3')
      with tf.variable_scope('Branch_1'):
        branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
        branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
        branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
        branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
                               padding='VALID', scope='Conv2d_1a_3x3')
      with tf.variable_scope('Branch_2'):
        branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
                                   scope='MaxPool_1a_3x3')
      return tf.concat_v2([branch_0, branch_1, branch_2], 3)
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def block_inception_c(inputs, scope=None, reuse=None):
  """Builds Inception-C block for Inception v4 network."""
  # By default use stride=1 and SAME padding
  with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
                      stride=1, padding='SAME'):
    with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
      with tf.variable_scope('Branch_0'):
        branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
      with tf.variable_scope('Branch_1'):
        branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
        branch_1 = tf.concat_v2([
            slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
            slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')], 3)
      with tf.variable_scope('Branch_2'):
        branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
        branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
        branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
        branch_2 = tf.concat_v2([
            slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
            slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')], 3)
      with tf.variable_scope('Branch_3'):
        branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
        branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
      return tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    '''Concatenates a list of tensors alongside the specified axis.
    '''
    if axis < 0:
        dims = ndim(tensors[0])
        if dims:
            axis = axis % dims
        else:
            axis = 0

    if py_all([is_sparse(x) for x in tensors]):
        return tf.sparse_concat(axis, tensors)
    else:
        try:
            return tf.concat_v2([to_dense(x) for x in tensors], axis)
        except AttributeError:
            return tf.concat(axis, [to_dense(x) for x in tensors])
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def concatenate(tensors, axis=-1):
    """Concatenates a list of tensors alongside the specified axis.

    # Returns
        A tensor.
    """
    if axis < 0:
        dims = ndim(tensors[0])
        if dims:
            axis = axis % dims
        else:
            axis = 0

    if py_all([is_sparse(x) for x in tensors]):
        return tf.sparse_concat(axis, tensors)
    else:
        if tf_major_version >= 1:
            return tf.concat([to_dense(x) for x in tensors], axis)
        else:
            try:
                return tf.concat_v2([to_dense(x) for x in tensors], axis)
            except AttributeError:
                return tf.concat(axis, [to_dense(x) for x in tensors])
项目:Unsupervised-Anomaly-Detection-with-Generative-Adversarial-Networks    作者:xtarx    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
        return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def conv_cond_concat(x, y):
  """Concatenate conditioning vector on feature map axis."""
  x_shapes = x.get_shape()
  y_shapes = y.get_shape()
  return tf.concat_v2([
      x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
项目:chi    作者:rmst    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
        return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:tensorflow-generative-model-collections    作者:hwalsuklee    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
        return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:6853-Project    作者:kennygea    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
    return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:predictron    作者:zhongwen    | 项目源码 | 文件源码
def average_gradients(tower_grads):
  """Calculate the average gradient for each shared variable across all towers.
  Note that this function provides a synchronization point across all towers.
  Args:
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over individual gradients. The inner list is over the gradient
      calculation for each tower.
  Returns:
     List of pairs of (gradient, variable) where the gradient has been averaged
     across all towers.
  """
  average_grads = []
  for grad_and_vars in zip(*tower_grads):
    # Note that each grad_and_vars looks like the following:
    #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
    grads = []
    for g, _ in grad_and_vars:
      # Add 0 dimension to the gradients to represent the tower.
      expanded_g = tf.expand_dims(g, 0)

      # Append on a 'tower' dimension which we will average over below.
      grads.append(expanded_g)

    # Average over the 'tower' dimension.
    grad = tf.concat_v2(grads, 0)
    grad = tf.reduce_mean(grad, 0)

    # Keep in mind that the Variables are redundant because they are shared
    # across towers. So .. we will just return the first tower's pointer to
    # the Variable.
    v = grad_and_vars[0][1]
    grad_and_var = (grad, v)
    average_grads.append(grad_and_var)
  return average_grads
项目:easygen    作者:markriedl    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
    return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def forward_tensor(self, x):
        N, D = tf.shape(x)[0], tf.shape(x)[2]
        xm = tf.slice(x, [0, 0, 0], tf.stack([N - 1, -1, -1]))
        xp = x[1:, :, :]
        diagblocks = tf.matmul(x, x, transpose_a=True)
        offblocks = tf.concat_v2([tf.matmul(xm, xp, transpose_a=True), tf.zeros((1, D, D), 0, dtype=tf.float64)])
        return tf.stack([diagblocks, offblocks])
项目:ABC-GAN    作者:IgorSusmelj    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
    return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:img2img    作者:lsqpku    | 项目源码 | 文件源码
def concat(tensors, axis, *args, **kwargs):
    return tf.concat_v2(tensors, axis, *args, **kwargs)
项目:DeepModel    作者:KarolAntczak    | 项目源码 | 文件源码
def l2_norm(weights):
    weights_flat = []
    for weight in weights:
        weights_flat = tf.concat_v2([weights_flat, tf.reshape(weight, [-1])], axis=0)
    return tf.reduce_mean(tf.square(weights_flat))
项目:predictron    作者:zhongwen    | 项目源码 | 文件源码
def build_model(self):
    sc = predictron_arg_scope()
    with tf.variable_scope('state'):
      with slim.arg_scope(sc):
        state = slim.conv2d(self.inputs, 32, [3, 3], scope='conv1')
        state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv1/preact')
        state = slim.conv2d(state, 32, [3, 3], scope='conv2')
        state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv2/preact')

    iter_template = tf.make_template('iter', self.iter_func, unique_name_='iter')

    rewards_arr = []
    gammas_arr = []
    lambdas_arr = []
    values_arr = []

    for k in range(self.max_depth):
      state, reward, gamma, lambda_, value = iter_template(state)
      rewards_arr.append(reward)
      gammas_arr.append(gamma)
      lambdas_arr.append(lambda_)
      values_arr.append(value)

    _, _, _, _, value = iter_template(state)
    # K + 1 elements
    values_arr.append(value)

    bs = tf.shape(self.inputs)[0]
    # [batch_size, K * maze_size]
    self.rewards = tf.pack(rewards_arr, axis=1)
    # [batch_size, K, maze_size]
    self.rewards = tf.reshape(self.rewards, [bs, self.max_depth, self.maze_size])
    # [batch_size, K + 1, maze_size]
    self.rewards = tf.concat_v2(values=[tf.zeros(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.rewards],
                                axis=1, name='rewards')

    # [batch_size, K * maze_size]
    self.gammas = tf.pack(gammas_arr, axis=1)
    # [batch_size, K, maze_size]
    self.gammas = tf.reshape(self.gammas, [bs, self.max_depth, self.maze_size])
    # [batch_size, K + 1, maze_size]
    self.gammas = tf.concat_v2(values=[tf.ones(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.gammas],
                               axis=1, name='gammas')

    # [batch_size, K * maze_size]
    self.lambdas = tf.pack(lambdas_arr, axis=1)
    # [batch_size, K, maze_size]
    self.lambdas = tf.reshape(self.lambdas, [-1, self.max_depth, self.maze_size])

    # [batch_size, (K + 1) * maze_size]
    self.values = tf.pack(values_arr, axis=1)
    # [batch_size, K + 1, maze_size]
    self.values = tf.reshape(self.values, [-1, (self.max_depth + 1), self.maze_size])

    self.build_preturns()
    self.build_lambda_preturns()
项目:alternating-reader-tf    作者:nschuc    | 项目源码 | 文件源码
def _inference(self, docs, queries):
        """
        Computes document attentions given a document batch and query batch.
        """
        with tf.name_scope("inference"):
            # Compute document lengths / query lengths for batch
            doc_lens = length(docs)
            query_lens = length(queries)
            batch_size = tf.shape(docs)[0]

            with tf.variable_scope('encode'):
                # Encode Document / Query
                with tf.variable_scope('docs'), tf.device('/gpu:0'):
                    encoded_docs = tf.nn.dropout(self._embed(docs), self._keep_prob)
                    encoded_docs = self._bidirectional_encode(encoded_docs, doc_lens, self._encode_size)
                with tf.variable_scope('queries'), tf.device('/gpu:1'):
                    encoded_queries = tf.nn.dropout(self._embed(queries), self._keep_prob)
                    encoded_queries = self._bidirectional_encode(encoded_queries, query_lens, self._encode_size)

            with tf.variable_scope('attend') as scope:
                infer_gru = tf.nn.rnn_cell.GRUCell(self._infer_size)
                infer_state = infer_gru.zero_state(batch_size, tf.float32)
                for iter_step in range(self._num_glimpses):
                    if iter_step > 0:
                        scope.reuse_variables()

                    # Glimpse query and document
                    with tf.device('/gpu:0'):
                        q_attention, q_glimpse = self._glimpse(self._A_q, self._a_q, encoded_queries, infer_state)
                        tf.add_to_collection('query_attentions', q_attention)
                    with tf.device('/gpu:1'):
                        d_attention, d_glimpse = self._glimpse(self._A_d, self._a_d, encoded_docs, tf.concat_v2([infer_state, q_glimpse], 1))
                        tf.add_to_collection('doc_attentions', d_attention)
                    # Search Gates

                    gate_concat = tf.concat_v2([infer_state, q_glimpse, d_glimpse, q_glimpse * d_glimpse], 1)

                    r_d = tf.sigmoid(tf.matmul(gate_concat, self._g_d))
                    r_d = tf.nn.dropout(r_d, self._keep_prob)
                    r_q = tf.sigmoid(tf.matmul(gate_concat, self._g_q))
                    r_q = tf.nn.dropout(r_q, self._keep_prob)

                    combined_gated_glimpse = tf.concat_v2([r_q * q_glimpse, r_d * d_glimpse], 1)
                    _, infer_state = infer_gru(combined_gated_glimpse, infer_state)

            return tf.to_float(tf.sign(tf.abs(docs))) * d_attention