Python tensorflow 模块,concat() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.concat()

项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for op_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.op_scope([labels], scope, 'OneHotEncoding'):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels
项目:visual-search    作者:GYXie    | 项目源码 | 文件源码
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w,  padding="VALID", group=1):
    '''From https://github.com/ethereon/caffe-tensorflow
    '''
    c_i = input.get_shape()[-1]
    assert c_i%group==0
    assert c_o%group==0
    convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)


    if group==1:
        conv = convolve(input, kernel)
    else:
        input_groups = tf.split(3, group, input)
        kernel_groups = tf.split(3, group, kernel)
        output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
        conv = tf.concat(3, output_groups)
    return  tf.reshape(tf.nn.bias_add(conv, biases), [-1]+conv.get_shape().as_list()[1:])
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _fire_layer(self, layer_name, inputs, s1x1, e1x1, e3x3, freeze=False):
    """Fire layer constructor.

    Args:
      layer_name: layer name
      inputs: input tensor
      s1x1: number of 1x1 filters in squeeze layer.
      e1x1: number of 1x1 filters in expand layer.
      e3x3: number of 3x3 filters in expand layer.
      freeze: if true, do not train parameters in this layer.
    Returns:
      fire layer operation.
    """

    sq1x1 = self._conv_layer(
        layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
        padding='SAME', freeze=freeze)
    ex1x1 = self._conv_layer(
        layer_name+'/expand1x1', sq1x1, filters=e1x1, size=1, stride=1,
        padding='SAME', freeze=freeze)
    ex3x3 = self._conv_layer(
        layer_name+'/expand3x3', sq1x1, filters=e3x3, size=3, stride=1,
        padding='SAME', freeze=freeze)

    return tf.concat(3, [ex1x1, ex3x3], name=layer_name+'/concat')
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _fire_layer(self, layer_name, inputs, s1x1, e1x1, e3x3, freeze=False):
    """Fire layer constructor.

    Args:
      layer_name: layer name
      inputs: input tensor
      s1x1: number of 1x1 filters in squeeze layer.
      e1x1: number of 1x1 filters in expand layer.
      e3x3: number of 3x3 filters in expand layer.
      freeze: if true, do not train parameters in this layer.
    Returns:
      fire layer operation.
    """

    sq1x1 = self._conv_layer(
        layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
        padding='SAME', freeze=freeze)
    ex1x1 = self._conv_layer(
        layer_name+'/expand1x1', sq1x1, filters=e1x1, size=1, stride=1,
        padding='SAME', freeze=freeze)
    ex3x3 = self._conv_layer(
        layer_name+'/expand3x3', sq1x1, filters=e3x3, size=3, stride=1,
        padding='SAME', freeze=freeze)

    return tf.concat(3, [ex1x1, ex3x3], name=layer_name+'/concat')
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def model(self, features, labels):
        x = features["observation"]
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        actions = tf.one_hot(tf.reshape(features["action"],[-1]), depth=6, on_value=1.0, off_value=0.0, axis=1)
        x = tf.concat(1, [tf.contrib.layers.flatten(x),  actions])
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        logits = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)
        prediction = tf.sigmoid(logits, name="prediction")
        loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.expand_dims(labels, axis=1)),name="loss")
        train_op = tf.contrib.layers.optimize_loss(
          loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
          learning_rate=self.learning_rate)
        tf.add_to_collection('prediction', prediction)
        tf.add_to_collection('loss', loss)
        return prediction, loss, train_op
项目:X-ray-classification    作者:bendidi    | 项目源码 | 文件源码
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
  """Builds the 35x35 resnet block."""
  with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
    with tf.variable_scope('Branch_0'):
      tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
    with tf.variable_scope('Branch_1'):
      tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
      tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
    with tf.variable_scope('Branch_2'):
      tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
      tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
      tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
    mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
    up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                     activation_fn=None, scope='Conv2d_1x1')
    net += scale * up
    if activation_fn:
      net = activation_fn(net)
  return net
项目:X-ray-classification    作者:bendidi    | 项目源码 | 文件源码
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
  """Builds the 17x17 resnet block."""
  with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
    with tf.variable_scope('Branch_0'):
      tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
    with tf.variable_scope('Branch_1'):
      tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
      tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
                                  scope='Conv2d_0b_1x7')
      tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
                                  scope='Conv2d_0c_7x1')
    mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
    up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                     activation_fn=None, scope='Conv2d_1x1')
    net += scale * up
    if activation_fn:
      net = activation_fn(net)
  return net
项目:X-ray-classification    作者:bendidi    | 项目源码 | 文件源码
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
  """Builds the 8x8 resnet block."""
  with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
    with tf.variable_scope('Branch_0'):
      tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
    with tf.variable_scope('Branch_1'):
      tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
      tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
                                  scope='Conv2d_0b_1x3')
      tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
                                  scope='Conv2d_0c_3x1')
    mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
    up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                     activation_fn=None, scope='Conv2d_1x1')
    net += scale * up
    if activation_fn:
      net = activation_fn(net)
  return net
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def decode(self, cell_dec, enc_final_state, output_size, output_embed_matrix, training, grammar_helper=None):
        if self.config.use_dot_product_output:
            output_layer = DotProductLayer(output_embed_matrix)
        else:
            output_layer = tf.layers.Dense(output_size, use_bias=False)

        go_vector = tf.ones((self.batch_size,), dtype=tf.int32) * self.config.grammar.start
        if training:
            output_ids_with_go = tf.concat([tf.expand_dims(go_vector, axis=1), self.output_placeholder], axis=1)
            outputs = tf.nn.embedding_lookup([output_embed_matrix], output_ids_with_go)
            helper = TrainingHelper(outputs, self.output_length_placeholder+1)
        else:
            helper = GreedyEmbeddingHelper(output_embed_matrix, go_vector, self.config.grammar.end)

        if self.config.use_grammar_constraints:
            decoder = GrammarBasicDecoder(self.config.grammar, cell_dec, helper, enc_final_state, output_layer=output_layer, training_output = self.output_placeholder if training else None,
                                          grammar_helper=grammar_helper)
        else:
            decoder = BasicDecoder(cell_dec, helper, enc_final_state, output_layer=output_layer)

        final_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, maximum_iterations=self.max_length)

        return final_outputs
项目:visual-search    作者:GYXie    | 项目源码 | 文件源码
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
    '''From https://github.com/ethereon/caffe-tensorflow
    '''
    c_i = input.get_shape()[-1]
    assert c_i % group == 0
    assert c_o % group == 0
    convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)

    if group == 1:
        conv = convolve(input, kernel)
    else:
        input_groups = tf.split(3, group, input)
        kernel_groups = tf.split(3, group, kernel)
        output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
        conv = tf.concat(3, output_groups)
    return tf.reshape(tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:])
项目:visual-search    作者:GYXie    | 项目源码 | 文件源码
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
    '''From https://github.com/ethereon/caffe-tensorflow
    '''
    c_i = input.get_shape()[-1]
    assert c_i % group == 0
    assert c_o % group == 0
    convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)

    if group == 1:
        conv = convolve(input, kernel)
    else:
        input_groups = tf.split(3, group, input)
        kernel_groups = tf.split(3, group, kernel)
        output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
        conv = tf.concat(3, output_groups)
    return tf.reshape(tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:])
项目:visual-search    作者:GYXie    | 项目源码 | 文件源码
def conv(input, kernel, biases, k_h, k_w, c_o, s_h, s_w, padding="VALID", group=1):
    '''From https://github.com/ethereon/caffe-tensorflow
    '''
    c_i = input.get_shape()[-1]
    assert c_i % group == 0
    assert c_o % group == 0
    convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)

    if group == 1:
        conv = convolve(input, kernel)
    else:
        input_groups = tf.split(3, group, input)
        kernel_groups = tf.split(3, group, kernel)
        output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
        conv = tf.concat(3, output_groups)
    return tf.reshape(tf.nn.bias_add(conv, biases), [-1] + conv.get_shape().as_list()[1:])
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, **unused_params):
    bound = FLAGS.softmax_bound
    vocab_size_1 = bound
    with tf.name_scope("loss_softmax"):
      epsilon = 10e-8
      float_labels = tf.cast(labels, tf.float32)
      labels_1 = float_labels[:,:vocab_size_1]
      predictions_1 = predictions[:,:vocab_size_1]
      cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
      lables_2 = float_labels[:,vocab_size_1:]
      predictions_2 = predictions[:,vocab_size_1:]
      # l1 normalization (labels are no less than 0)
      label_rowsum = tf.maximum(
          tf.reduce_sum(lables_2, 1, keep_dims=True),
          epsilon)
      label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
      norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
      predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
      softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
      softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
          1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
      softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
    return tf.reduce_mean(softmax_loss) + cross_entropy_loss
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_forward_parameters(vocab_size=4716):
    t_vars = tf.trainable_variables()
    h1_vars_weight = [var for var in t_vars if 'hidden_1' in var.name and 'weights' in var.name]
    h1_vars_biases = [var for var in t_vars if 'hidden_1' in var.name and 'biases' in var.name]
    h2_vars_weight = [var for var in t_vars if 'hidden_2' in var.name and 'weights' in var.name]
    h2_vars_biases = [var for var in t_vars if 'hidden_2' in var.name and 'biases' in var.name]
    o1_vars_weight = [var for var in t_vars if 'output_1' in var.name and 'weights' in var.name]
    o1_vars_biases = [var for var in t_vars if 'output_1' in var.name and 'biases' in var.name]
    o2_vars_weight = [var for var in t_vars if 'output_2' in var.name and 'weights' in var.name]
    o2_vars_biases = [var for var in t_vars if 'output_2' in var.name and 'biases' in var.name]
    h1_vars_biases = tf.reshape(h1_vars_biases[0],[1,FLAGS.hidden_size_1])
    h2_vars_biases = tf.reshape(h2_vars_biases[0],[1,FLAGS.hidden_size_2])
    o1_vars_biases = tf.reshape(o1_vars_biases[0],[1,FLAGS.hidden_size_1])
    o2_vars_biases = tf.reshape(o2_vars_biases[0],[1,vocab_size])
    vars_1 = tf.concat((h1_vars_weight[0],h1_vars_biases),axis=0)
    vars_2 = tf.concat((h2_vars_weight[0],h2_vars_biases),axis=0)
    vars_3 = tf.concat((o1_vars_weight[0],o1_vars_biases),axis=0)
    vars_4 = tf.concat((o2_vars_weight[0],o2_vars_biases),axis=0)
    return [vars_1,vars_2,vars_3,vars_4]
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_forward_parameters(vocab_size=4716):
    t_vars = tf.trainable_variables()
    h1_vars_weight = [var for var in t_vars if 'hidden_1' in var.name and 'weights' in var.name]
    h1_vars_biases = [var for var in t_vars if 'hidden_1' in var.name and 'biases' in var.name]
    h2_vars_weight = [var for var in t_vars if 'hidden_2' in var.name and 'weights' in var.name]
    h2_vars_biases = [var for var in t_vars if 'hidden_2' in var.name and 'biases' in var.name]
    o1_vars_weight = [var for var in t_vars if 'output_1' in var.name and 'weights' in var.name]
    o1_vars_biases = [var for var in t_vars if 'output_1' in var.name and 'biases' in var.name]
    o2_vars_weight = [var for var in t_vars if 'output_2' in var.name and 'weights' in var.name]
    o2_vars_biases = [var for var in t_vars if 'output_2' in var.name and 'biases' in var.name]
    h1_vars_biases = tf.reshape(h1_vars_biases[0],[1,FLAGS.hidden_size_1])
    h2_vars_biases = tf.reshape(h2_vars_biases[0],[1,FLAGS.hidden_size_2])
    o1_vars_biases = tf.reshape(o1_vars_biases[0],[1,FLAGS.hidden_size_1])
    o2_vars_biases = tf.reshape(o2_vars_biases[0],[1,vocab_size])
    vars_1 = tf.concat((h1_vars_weight[0],h1_vars_biases),axis=0)
    vars_2 = tf.concat((h2_vars_weight[0],h2_vars_biases),axis=0)
    vars_3 = tf.concat((o1_vars_weight[0],o1_vars_biases),axis=0)
    vars_4 = tf.concat((o2_vars_weight[0],o2_vars_biases),axis=0)
    return [vars_1,vars_2,vars_3,vars_4]
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, **unused_params):
        bound = FLAGS.softmax_bound
        vocab_size_1 = bound
        with tf.name_scope("loss_softmax"):
            epsilon = 10e-8
            float_labels = tf.cast(labels, tf.float32)
            labels_1 = float_labels[:,:vocab_size_1]
            predictions_1 = predictions[:,:vocab_size_1]
            cross_entropy_loss = CrossEntropyLoss().calculate_loss(predictions_1,labels_1)
            lables_2 = float_labels[:,vocab_size_1:]
            predictions_2 = predictions[:,vocab_size_1:]
            # l1 normalization (labels are no less than 0)
            label_rowsum = tf.maximum(
                tf.reduce_sum(lables_2, 1, keep_dims=True),
                epsilon)
            label_append = 1.0-tf.reduce_max(lables_2, 1, keep_dims=True)
            norm_float_labels = tf.concat((tf.div(lables_2, label_rowsum),label_append),axis=1)
            predictions_append = 1.0-tf.reduce_sum(predictions_2, 1, keep_dims=True)
            softmax_outputs = tf.concat((predictions_2,predictions_append),axis=1)
            softmax_loss = norm_float_labels * tf.log(softmax_outputs + epsilon) + (
                                                                                       1 - norm_float_labels) * tf.log(1 - softmax_outputs + epsilon)
            softmax_loss = tf.negative(tf.reduce_sum(softmax_loss, 1))
        return tf.reduce_mean(softmax_loss) + cross_entropy_loss
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def create_model(self, model_input, vocab_size, num_mixtures=None,
                   l2_penalty=1e-8, sub_scope="", original_input=None, **unused_params):
    num_supports = FLAGS.num_supports
    num_layers = FLAGS.hidden_chain_layers
    relu_cells = FLAGS.hidden_chain_relu_cells

    next_input = model_input
    support_predictions = []
    for layer in xrange(num_layers):
      sub_relu = slim.fully_connected(
          next_input,
          relu_cells,
          activation_fn=tf.nn.relu,
          weights_regularizer=slim.l2_regularizer(l2_penalty),
          scope=sub_scope+"relu-%d"%layer)
      sub_prediction = self.sub_model(sub_relu, vocab_size, sub_scope=sub_scope+"prediction-%d"%layer)
      relu_norm = tf.nn.l2_normalize(sub_relu, dim=1)
      next_input = tf.concat([next_input, relu_norm], axis=1)
      support_predictions.append(sub_prediction)
    main_predictions = self.sub_model(next_input, vocab_size, sub_scope=sub_scope+"-main")
    support_predictions = tf.concat(support_predictions, axis=1)
    return {"predictions": main_predictions, "support_predictions": support_predictions}
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def shift(self, 
          model_input, 
          shift_width,
          **unused_params):
    max_frames = model_input.get_shape().as_list()[1]
    num_features = model_input.get_shape().as_list()[2]

    shift_inputs = []
    for i in xrange(shift_width):
      if i == 0:
        shift_inputs.append(model_input)
      else:
        shift_inputs.append(tf.pad(model_input, paddings=[[0,0],[i,0],[0,0]])[:,:max_frames,:])

    shift_output = tf.concat(shift_inputs, axis=2)
    return shift_output
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def sub_lstm(self, model_input, num_frames, lstm_size, number_of_layers, sub_scope=""):
    stacked_lstm = tf.contrib.rnn.MultiRNNCell(
            [
                tf.contrib.rnn.BasicLSTMCell(
                    lstm_size, forget_bias=1.0, state_is_tuple=True)
                for _ in range(number_of_layers)
                ],
            state_is_tuple=True)

    loss = 0.0
    with tf.variable_scope(sub_scope+"-RNN"):
      outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
                                         sequence_length=num_frames, 
                                         swap_memory=FLAGS.rnn_swap_memory,
                                         dtype=tf.float32)
      final_state = tf.concat(map(lambda x: x.c, state), axis = 1)
    return final_state
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def sub_lstm(self, model_input, num_frames, lstm_size, number_of_layers, sub_scope=""):
    stacked_lstm = tf.contrib.rnn.MultiRNNCell(
            [
                tf.contrib.rnn.BasicLSTMCell(
                    lstm_size, forget_bias=1.0, state_is_tuple=True)
                for _ in range(number_of_layers)
                ],
            state_is_tuple=True)

    loss = 0.0
    with tf.variable_scope(sub_scope+"-RNN"):
      outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
                                         sequence_length=num_frames, 
                                         swap_memory=FLAGS.rnn_swap_memory,
                                         dtype=tf.float32)
      final_state = tf.concat(map(lambda x: x.c, state), axis = 1)
    return final_state
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def augment(self, model_input_raw, num_frames, labels_batch, **unused_params):
    assert(FLAGS.frame_feature, 
           "AugmentationTransformer only works with frame feature")
    feature_dim = len(model_input_raw.get_shape()) - 1
    frame_dim = len(model_input_raw.get_shape()) - 2
    max_frame = model_input_raw.get_shape().as_list()[frame_dim]

    limit = tf.cast(tf.reduce_min(num_frames) / 4.0, tf.int32)
    offset = tf.random_uniform(shape=[], dtype=tf.int32) % limit
    input_trans1 = tf.pad(model_input_raw[:,offset:,:], paddings=[0,offset,0])
    num_frames_trans1 = num_frames - offset
    num_frames_trans1 = tf.cast(
                tf.random_uniform(shape=num_frames.shape, minval=0.75, maxval=1.0, 
                                  dtype=tf.float32) 
                * num_frames_trans1, tf.int32)
    model_input = tf.concat([model_input_raw, input_trans1], axis=0)
    labels_batch = tf.concat([labels_batch, labels_batch], axis=0)
    num_frames = tf.concat([num_frames, num_frames_trans1], axis=0)
    return model_input, labels_batch, num_frames_new
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def prepare_reader(self, filename_queue, batch_size=1024):

    reader = tf.TFRecordReader()
    _, serialized_examples = reader.read_up_to(filename_queue, batch_size)

    # set the mapping from the fields to data types in the proto
    num_features = len(self.feature_names)
    assert num_features > 0, "self.feature_names is empty!"
    assert len(self.feature_names) == len(self.feature_sizes), \
    "length of feature_names (={}) != length of feature_sizes (={})".format( \
    len(self.feature_names), len(self.feature_sizes))

    feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
                   "labels": tf.VarLenFeature(tf.int64)}
    for feature_index in range(num_features):
      feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
          [self.feature_sizes[feature_index]], tf.float32)

    features = tf.parse_example(serialized_examples, features=feature_map)
    labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
    labels.set_shape([None, self.num_classes])
    concatenated_features = tf.concat([
        features[feature_name] for feature_name in self.feature_names], 1)

    return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def q_value(q_dist, num_atoms, num_actions, V_max, delta_z):
    V_min = -V_max
    start = V_min
    end = V_max + delta_z
    delta = delta_z
    z = tf.range(start, end, delta)

    q_as = []

    for action in range(num_actions):
        dist = q_dist[:, num_atoms*action: num_atoms*(action+1)]
        q_a = tf.reduce_sum(tf.multiply(dist, z), axis = 1, keep_dims = True)
        q_as.append(q_a)

    q_values = tf.concat(q_as, axis=1)

    return q_values
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def discriminate(self, x_var, y, weights, biases, reuse=False):

        y1 =  tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim])
        x_var = conv_cond_concat(x_var, y1)

        conv1= lrelu(conv2d(x_var, weights['wc1'], biases['bc1']))

        conv1 = conv_cond_concat(conv1, y1)

        conv2= lrelu(batch_normal(conv2d(conv1, weights['wc2'], biases['bc2']), scope='dis_bn1', reuse=reuse))

        conv2 = tf.reshape(conv2, [self.batch_size, -1])

        conv2 = tf.concat([conv2, y], 1)

        fc1 = lrelu(batch_normal(fully_connect(conv2, weights['wc3'], biases['bc3']), scope='dis_bn2', reuse=reuse))

        fc1 = tf.concat([fc1, y], 1)
        #for D
        output= fully_connect(fc1, weights['wd'], biases['bd'])

        return tf.nn.sigmoid(output)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create(self):
    # Concat bridge inputs on the depth dimensions
    bridge_input = nest.map_structure(
        lambda x: tf.reshape(x, [self.batch_size, _total_tensor_depth(x)]),
        self._bridge_input)
    bridge_input_flat = nest.flatten([bridge_input])
    bridge_input_concat = tf.concat(bridge_input_flat, 1)

    state_size_splits = nest.flatten(self.decoder_state_size)
    total_decoder_state_size = sum(state_size_splits)

    # Pass bridge inputs through a fully connected layer layer
    initial_state_flat = tf.contrib.layers.fully_connected(
        inputs=bridge_input_concat,
        num_outputs=total_decoder_state_size,
        activation_fn=self._activation_fn)

    # Shape back into required state size
    initial_state = tf.split(initial_state_flat, state_size_splits, axis=1)
    return nest.pack_sequence_as(self.decoder_state_size, initial_state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def accumulate_strings(values, name="strings"):
  """Accumulates strings into a vector.

  Args:
    values: A 1-d string tensor that contains values to add to the accumulator.

  Returns:
    A tuple (value_tensor, update_op).
  """
  tf.assert_type(values, tf.string)
  strings = tf.Variable(
      name=name,
      initial_value=[],
      dtype=tf.string,
      trainable=False,
      collections=[],
      validate_shape=True)
  value_tensor = tf.identity(strings)
  update_op = tf.assign(
      ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
  return value_tensor, update_op
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def decode(self, data, items):
    decoded_items = {}

    # Split tokens
    tokens = tf.string_split([data], delimiter=self.delimiter).values

    # Optionally prepend a special token
    if self.prepend_token is not None:
      tokens = tf.concat([[self.prepend_token], tokens], 0)

    # Optionally append a special token
    if self.append_token is not None:
      tokens = tf.concat([tokens, [self.append_token]], 0)

    decoded_items[self.length_feature_name] = tf.size(tokens)
    decoded_items[self.tokens_feature_name] = tokens
    return [decoded_items[_] for _ in items]
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, states = tf.nn.bidirectional_dynamic_rnn(
        cell_fw=cell_fw,
        cell_bw=cell_bw,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)

    # Concatenate outputs and states of the forward and backward RNNs
    outputs_concat = tf.concat(outputs, 2)

    return EncoderOutput(
        outputs=outputs_concat,
        final_state=states,
        attention_values=outputs_concat,
        attention_values_length=sequence_length)
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def gray2jet(x):
    ''' NHWC (channel last) format '''
    with tf.name_scope('Gray2Jet'):
        r = clip_to_boundary(
            line(x, .3515, .66, 0., 1.),
            line(x, .8867, 1., 1., .5),
            minval=0.,
            maxval=1.,
        )
        g = clip_to_boundary(
            line(x, .125, .375, 0., 1.),
            line(x, .64, .91, 1., 0.),
            minval=0.,
            maxval=1.,
        )
        b = clip_to_boundary(
            line(x, .0, .1132, 0.5, 1.0),
            line(x, .34, .648, 1., 0.),
            minval=0.,
            maxval=1.,
        )
        return tf.concat([r, g, b], axis=-1)
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def _generator(self, z, y, is_training):
        '''
        Input:
            z: shape=[b, c]
            y: speaker label; shape=[b,], dtype=int64
        Return:
            xh: reconstructed version of `x` (the input to the VAE)
        '''
        self.speaker_repr = self._l2_regularized_embedding(
            n_class=self.arch['y_dim'],
            h_dim=self.arch['yemb_dim'],
            scope_name='y_embedding',
            var_name='y_emb'
        )

        c = tf.nn.embedding_lookup(self.speaker_repr, y)
        x = tf.concat([z, c], -1)
        for o in self.arch['decoder']['output']:
            x = tf.layers.dense(x, units=o, activation=lrelu)            
            # x = tf.layers.batch_normalization(x, training=is_training)
        return tf.layers.dense(x, units=self.arch['x_dim'], name='xh')
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def _crop_pool_layer(self, bottom, rois, name):
    with tf.variable_scope(name) as scope:
      batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
      # Get the normalized coordinates of bounding boxes
      bottom_shape = tf.shape(bottom)
      height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
      width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
      x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
      y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
      x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
      y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
      # Won't be back-propagated to rois anyway, but to save time
      bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
      pre_pool_size = cfg.POOLING_SIZE * 2
      crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")

    return slim.max_pool2d(crops, [2, 2], padding='SAME')
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def bilateral_slice(grid, guide, name=None):
  """Slices into a bilateral grid using the guide map.

  Args:
    grid: (Tensor) [batch_size, grid_h, grid_w, depth, n_outputs]
      grid to slice from.
    guide: (Tensor) [batch_size, h, w ] guide map to slice along.
    name: (string) name for the operation.
  Returns:
    sliced: (Tensor) [batch_size, h, w, n_outputs] sliced output.
  """

  with tf.name_scope(name):
    gridshape = grid.get_shape().as_list()
    if len(gridshape) == 6:
      _, _, _, _, n_out, n_in = gridshape
      grid = tf.concat(tf.unstack(grid, None, axis=5), 4)

    sliced = hdrnet_ops.bilateral_slice(grid, guide)

    if len(gridshape) == 6:
      sliced = tf.stack(tf.split(sliced, n_in, axis=3), axis=4)
    return sliced
# pylint: enable=redefined-builtin
项目:Renewables_Scenario_Gen_GAN    作者:chennnnnyize    | 项目源码 | 文件源码
def discriminate(self, image, Y):
        print("Initializing the discriminator")
        print("Y shape", Y.get_shape())
        yb = tf.reshape(Y, tf.stack([self.batch_size, 1, 1, self.dim_y]))
        print("image shape", image.get_shape())
        print("yb shape", yb.get_shape())
        X = tf.concat([image, yb * tf.ones([self.batch_size, 24, 24, self.dim_y])],3)
        print("X shape", X.get_shape())
        h1 = lrelu( tf.nn.conv2d( X, self.discrim_W1, strides=[1,2,2,1], padding='SAME' ))
        print("h1 shape", h1.get_shape())
        h1 = tf.concat([h1, yb * tf.ones([self.batch_size, 12, 12, self.dim_y])],3)
        print("h1 shape", h1.get_shape())
        h2 = lrelu(batchnormalize( tf.nn.conv2d( h1, self.discrim_W2, strides=[1,2,2,1], padding='SAME')) )
        print("h2 shape", h2.get_shape())
        h2 = tf.reshape(h2, [self.batch_size, -1])
        h2 = tf.concat([h2, Y], 1)
        discri=tf.matmul(h2, self.discrim_W3 )
        print("discri shape", discri.get_shape())
        h3 = lrelu(batchnormalize(discri))
        return h3
项目:Renewables_Scenario_Gen_GAN    作者:chennnnnyize    | 项目源码 | 文件源码
def samples_generator(self, batch_size):
        Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])
        Y = tf.placeholder(tf.float32, [batch_size, self.dim_y])

        yb = tf.reshape(Y, [batch_size, 1, 1, self.dim_y])
        Z_ = tf.concat([Z,Y], 1)
        h1 = tf.nn.relu(batchnormalize(tf.matmul(Z_, self.gen_W1)))
        h1 = tf.concat([h1, Y], 1)
        h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))
        h2 = tf.reshape(h2, [batch_size,6,6,self.dim_W2])
        h2 = tf.concat([h2, yb*tf.ones([batch_size, 6,6, self.dim_y])], 3)

        output_shape_l3 = [batch_size,12,12,self.dim_W3]
        h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])
        h3 = tf.nn.relu( batchnormalize(h3) )
        h3 = tf.concat([h3, yb*tf.ones([batch_size, 12,12,self.dim_y])], 3)

        output_shape_l4 = [batch_size,24,24,self.dim_channel]
        h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])
        x = tf.nn.sigmoid(h4)
        return Z, Y, x
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def _middle_conv(self, stage):
        with tf.variable_scope('stage_' + str(stage)):
            self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
                                                 self.sub_stage_img_feature,
                                                 self.center_map],
                                                axis=3)
            with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                weights_initializer=tf.contrib.layers.xavier_initializer()):
                mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
                mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
                self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
                                                   scope='mid_conv7')
                self.stage_heatmap.append(self.current_heatmap)
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def _middle_conv(self, stage):
        with tf.variable_scope('stage_' + str(stage)):
            self.current_featuremap = tf.concat([self.stage_heatmap[stage-2],
                                                 self.sub_stage_img_feature,
                                                 # self.center_map,
                                                 ],
                                                axis=3)
            with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                weights_initializer=tf.contrib.layers.xavier_initializer()):
                mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4')
                mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5')
                mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6')
                self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1],
                                                   scope='mid_conv7')
                self.stage_heatmap.append(self.current_heatmap)
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def conv_cond_concat(x, y):
    """Concatenate conditioning vector on feature map axis."""
    #print('input x:',x.get_shape().as_list())
    #print('input y:',y.get_shape().as_list())

    xshape=x.get_shape()
    #tile by [1,64,64,1]

    tile_shape=tf.stack([1,xshape[1],xshape[2],1])
    tile_y=tf.tile(y,tile_shape)

    #print('tile y:',tile_y.get_shape().as_list())

    return tf.concat([x,tile_y],axis=3)


    #x_shapes = x.get_shape()
    #y_shapes = y.get_shape()
    #return tf.concat([
    #x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def setup_tensor(self):
        if self._label is not None:#already setup
            if debug:
                #Notify that already setup (normal behavior)
                print('self.',self.name,' has refuted setting up tensor')
            return

        tf_parents=[self.z]+[node.label for node in self.parents]


        with tf.variable_scope(self.name) as vs:
            h=tf.concat(tf_parents,-1)#tensor of parent values
            for l in range(self.n_layers-1):
                h=slim.fully_connected(h,self.n_hidden,activation_fn=lrelu,scope='layer'+str(l))

            self._label_logit = slim.fully_connected(h,1,activation_fn=None,scope='proj')
            self._label=tf.nn.sigmoid( self._label_logit )
            if debug:
                print('self.',self.name,' has setup _label=',self._label)

            #There could actually be some (quiet) error here I think if one of the
            #names in the causal graph is a substring of some other name.
                #e.g. 'hair' and 'black_hair'
            #Sorry, not coded to anticipate corner case
            self.setup_var=tf.contrib.framework.get_variables(vs)
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def rgb_to_bgr(self, inputs):
        if True:
            if True:
                VGG_MEAN = [103.939, 116.779, 123.68]
                try:
                    red, green, blue = tf.split(inputs, 3, 3)
                except:
                    red, green, blue = tf.split(3,3,inputs)
                #assert red.get_shape().as_list()[1:] == [224, 224, 1]
                #assert green.get_shape().as_list()[1:] == [224, 224, 1]
                #assert blue.get_shape().as_list()[1:] == [224, 224, 1]
                try:
                    bgr = tf.concat([
                        blue - VGG_MEAN[0],
                        green - VGG_MEAN[1],
                        red - VGG_MEAN[2]], axis=3)
                except:
                    bgr = tf.concat(3,[
                        blue - VGG_MEAN[0],
                        green - VGG_MEAN[1],
                        red - VGG_MEAN[2]])
        return bgr
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _ZF_up_block(self,net, down, ksizes,filters,dropout,keep_prob,name,activations,strides,batchnorm):
        channels = net.get_shape().as_list()[-1]
        with tf.variable_scope(name.split('/')[-1]):
            net = self._deconv2D(net, ksize=2, in_channel=channels, 
                out_channel=channels, strides=[1,2,2,1], layer_name="%s/deconv"%(name), 
                padding='SAME', activation=None, L2 = 1)

            try:
                net = tf.concat([net,down],axis=3)
            except:
                net = tf.concat(3, [net,down])

            net = self.conv_block(net, "%s/conv_block"%(name), ksizes=ksizes, filters=filters,
                activations=activations, strides=strides, batchnorm=batchnorm)

            if dropout:
                net = tf.nn.dropout(net, keep_prob = self.keep_prob)

        return net
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def pad_up_to(vector, size, rank):
    length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1,))
    with tf.control_dependencies([tf.assert_non_negative(length_diff, data=(vector, size, tf.shape(vector)))]):
        padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0,0]*(rank-1)], axis=0), shape=((rank+1), 2))
        return tf.pad(vector, padding, mode='constant')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def encode(self, inputs, input_length, _parses):
        with tf.name_scope('BiLSTMEncoder'):
            fw_cell_enc = tf.contrib.rnn.MultiRNNCell([self._make_rnn_cell(i) for i in range(self._num_layers)])
            bw_cell_enc = tf.contrib.rnn.MultiRNNCell([self._make_rnn_cell(i) for i in range(self._num_layers)])

            outputs, output_state = tf.nn.bidirectional_dynamic_rnn(fw_cell_enc, bw_cell_enc, inputs, input_length,
                                                                    dtype=tf.float32)

            fw_output_state, bw_output_state = output_state
            # concat each element of the final state, so that we're compatible with a unidirectional
            # decoder
            output_state = nest.pack_sequence_as(fw_output_state, [tf.concat((x, y), axis=1) for x, y in zip(nest.flatten(fw_output_state), nest.flatten(bw_output_state))])

            return tf.concat(outputs, axis=2), output_state
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def _merge_batch_beams(self, t, s):
        """Merges the tensor from a batch of beams into a batch by beams.
        More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
        reshape this into [batch_size*beam_width, s]
        Args:
          t: Tensor of dimension [batch_size, beam_width, s]
        Returns:
          A reshaped version of t with dimension [batch_size * beam_width, s].
        """
        t_shape = tf.shape(t)
        reshaped = tf.reshape(t, tf.concat(([self._batch_size * self._beam_width], t_shape[2:]), axis=0))
        reshaped.set_shape(tf.TensorShape([None]).concatenate(s))
        return reshaped
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def _split_batch_beams(self, t, s):
        """Splits the tensor from a batch by beams into a batch of beams.
        More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
        reshape this into [batch_size, beam_width, s]
        Args:
          t: Tensor of dimension [batch_size*beam_width, s].
          s: (Possibly known) depth shape.
        Returns:
          A reshaped version of t with dimension [batch_size, beam_width, s].
        Raises:
          ValueError: If, after reshaping, the new tensor is not shaped
            `[batch_size, beam_width, s]` (assuming batch_size and beam_width
            are known statically).
        """
        t_shape = tf.shape(t)
        reshaped = tf.reshape(t, tf.concat(([self._batch_size, self._beam_width], t_shape[1:]), axis=0))
        reshaped.set_shape(tf.TensorShape([None, self._beam_width]).concatenate(t.shape[1:]))
        expected_reshaped_shape = tf.TensorShape([None, self._beam_width]).concatenate(s)
        if not reshaped.shape.is_compatible_with(expected_reshaped_shape):
            raise ValueError("Unexpected behavior when reshaping between beam width "
                             "and batch size.  The reshaped tensor has shape: %s.  "
                             "We expected it to have shape "
                             "(batch_size, beam_width, depth) == %s.  Perhaps you "
                             "forgot to create a zero_state with "
                             "batch_size=encoder_batch_size * beam_width?"
                             % (reshaped.shape, expected_reshaped_shape))
        return reshaped
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def pad_up_to(vector, size):
    rank = vector.get_shape().ndims - 1

    length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1,))
    with tf.control_dependencies([tf.assert_non_negative(length_diff, data=(vector, size, tf.shape(vector)))]):
        padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0,0]*(rank-1)], axis=0), shape=((rank+1), 2))
        return tf.pad(vector, padding, mode='constant')
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def __init__(self, wrapped : tf.contrib.rnn.RNNCell, parent_state):
        super().__init__()
        self._wrapped = wrapped
        self._flat_parent_state = tf.concat(nest.flatten(parent_state), axis=1)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def call(self, input, state):
        concat_input = tf.concat((self._flat_parent_state, input), axis=1)
        return self._wrapped.call(concat_input, state)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def __init__(self, wrapped : tf.contrib.rnn.RNNCell, constant_input):
        super().__init__()
        self._wrapped = wrapped
        self._flat_constant_input = tf.concat(nest.flatten(constant_input), axis=1)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def average_gradients(tower_gradients):
    r'''
    A routine for computing each variable's average of the gradients obtained from the GPUs.
    Note also that this code acts as a syncronization point as it requires all
    GPUs to be finished with their mini-batch before it can run to completion.
    '''
    # List of average gradients to return to the caller
    average_grads = []

    # Loop over gradient/variable pairs from all towers
    for grad_and_vars in zip(*tower_gradients):
        # Introduce grads to store the gradients for the current variable
        grads = []

        # Loop over the gradients for the current variable
        for g, _ in grad_and_vars:
            # Add 0 dimension to the gradients to represent the tower.
            expanded_g = tf.expand_dims(g, 0)
            # Append on a 'tower' dimension which we will average over below.
            grads.append(expanded_g)

        # Average over the 'tower' dimension
        grad = tf.concat(grads, 0)
        grad = tf.reduce_mean(grad, 0)

        # Create a gradient/variable tuple for the current variable with its average gradient
        grad_and_var = (grad, grad_and_vars[0][1])

        # Add the current tuple to average_grads
        average_grads.append(grad_and_var)

    # Return result to caller
    return average_grads



# Logging
# =======