Python tensorflow 模块,uniform_unit_scaling_initializer() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.uniform_unit_scaling_initializer()

项目:l3    作者:jacobandreas    | 项目源码 | 文件源码
def _linear(t_in, n_out):
    v_w = tf.get_variable(
            "w",
            shape=(t_in.get_shape()[-1], n_out),
            initializer=tf.uniform_unit_scaling_initializer(
                factor=INIT_SCALE))
    v_b = tf.get_variable(
            "b",
            shape=n_out,
            initializer=tf.constant_initializer(0))
    if len(t_in.get_shape()) == 2:
        return tf.einsum("ij,jk->ik", t_in, v_w) + v_b
    elif len(t_in.get_shape()) == 3:
        return tf.einsum("ijk,kl->ijl", t_in, v_w) + v_b
    else:
        assert False
项目:deep_separation_contraction    作者:edouardoyallon    | 项目源码 | 文件源码
def net(x, n_layer_per_block, n_classes, phase_train,alpha,number_channel, scope='deep_net'):
  with tf.variable_scope(scope):
    n1=number_channel
    n2=number_channel
    n3=number_channel
    n4=number_channel

    y = conv2d(x, 3, n1, 3, 1, 'SAME',False, phase_train,scope='conv_init')
    y = batch_norm(y, n1, phase_train, scope='bn_init')
    y = tf.nn.relu(y, name='relu_init')

    y = group(y, n1, n2, n_layer_per_block, False, alpha,phase_train, scope='group_1')
    y = group(y, n2, n3, n_layer_per_block, True,alpha, phase_train, scope='group_2')
    y = group(y, n3, n4, n_layer_per_block, True,alpha, phase_train, scope='group_3')

    y = tf.nn.avg_pool(y, [1, 8, 8, 1], [1, 1, 1, 1], 'VALID', name='avg_pool')
    y = tf.squeeze(y, squeeze_dims=[1, 2])

    w = tf.get_variable('DW', [n4, n_classes],initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    tf.add_to_collection('weights', w)
    bias = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0))
    y=tf.nn.xw_plus_b(y, w, bias)
  return y
项目:deep_separation_contraction    作者:edouardoyallon    | 项目源码 | 文件源码
def net(x, n_layer_per_block, n_classes, phase_train,number_channel, scope='deep_net'):
  with tf.variable_scope(scope):
    n1=number_channel
    n2=number_channel
    n3=number_channel
    n4=number_channel

    y = conv2d(x, 3, n1, 3, 1, 'SAME',False, phase_train,scope='conv_init')
    y = batch_norm(y, n1, phase_train, scope='bn_init')
    y = tf.nn.relu(y, name='relu_init')

    y = group(y, n1, n2, n_layer_per_block, False, phase_train, scope='group_1')
    y = group(y, n2, n3, n_layer_per_block, True, phase_train, scope='group_2')
    y = group(y, n3, n4, n_layer_per_block, True, phase_train, scope='group_3')

    y = tf.nn.avg_pool(y, [1, 8, 8, 1], [1, 1, 1, 1], 'VALID', name='avg_pool')
    y = tf.squeeze(y, squeeze_dims=[1, 2])

    w = tf.get_variable('DW', [n4, n_classes],initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    tf.add_to_collection('weights', w)
    bias = tf.get_variable('bias', [n_classes], initializer=tf.constant_initializer(0.0))
    y=tf.nn.xw_plus_b(y, w, bias)
  return y
项目:ResNet-deeplabV3    作者:Harvey1973    | 项目源码 | 文件源码
def output_layer (input_layer, num_labels):
    '''
    param input_layer : flattend 2D tensor
    param num_lables: number of classes
    return the output of FC layer : Y =Wx+b
    '''
    input_dim = input_layer.get_shape().as_list()[-1]
    fc_w = create_variables(name = 'fc_weight',shape = [input_dim,num_labels],is_fc_layer = True,initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    fc_b = create_variables(name = 'fc_bias',shape = [num_labels],is_fc_layer = False,initializer = tf.zeros_initializer())
    output = tf.matmul(input_layer,fc_w) + fc_b
    return output
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def linear(input_, output_dim, scope=None, stddev=.7):
    unif = tf.uniform_unit_scaling_initializer()
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        #w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=unif)
        w = tf.get_variable('w', [input_.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input_, w) + b
项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def activation_based_init(nonlinearity):
    """Returns initialiaation based on a nonlinearlity"""

    init = tf.uniform_unit_scaling_initializer()
    if nonlinearity == tf.nn.relu:
        init = tf.contrib.layers.xavier_initializer()
    elif nonlinearity == tf.nn.elu:
        init = tf.contrib.layers.variance_scaling_initializer()
    elif nonlinearity == selu:
        init = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN')

    return init
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    #print "*** ", x.get_shape()
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    #print "*** ", w.get_shape()
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    #print "*** ", b.get_shape()
    aaa = tf.nn.xw_plus_b(x, w, b)
    #print "*** ", aaa.get_shape()
    return tf.nn.xw_plus_b(x, w, b)
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _fully_connected_ST(self, x, out_dim):
    """FullyConnected layer for final output of the localization network in the spatial transformer"""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW2', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    initial = np.array([[1., 0, 0], [0, 1., 0]])
    initial = initial.astype('float32')
    initial = initial.flatten()
    b = tf.get_variable('biases2', [out_dim],
                        initializer=tf.constant_initializer(initial))
    return tf.nn.xw_plus_b(x, w, b)
项目:lm    作者:rafaljozefowicz    | 项目源码 | 文件源码
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=[shape[1], shape[0]])
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype, full_shape=shape)
    return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
            for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def tree_fc(self, left, right):
    # A simple tree RNN with a single fully connected layer.
    if self._weights is None:
      with tf.variable_scope(self._vscope):
        self._weights = tf.get_variable(
            "weights", [FLAGS.vector_size*2, FLAGS.vector_size],
            initializer=tf.uniform_unit_scaling_initializer(1.43))
        self._bias = tf.get_variable("bias", [FLAGS.vector_size],
                                     initializer=tf.zeros_initializer())
    x = tf.concat([left, right], 1)
    result = tf.add(tf.matmul(x, self._weights), self._bias)
    return tf.nn.relu(result)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def tree_lstm(self, left, right):
    # A variation on the tree LSTM -- we add an extra hidden layer.
    if self._weights is None:
      with tf.variable_scope(self._vscope):
        self._weights_0 = tf.get_variable(
            "weights_0", [FLAGS.vector_size*2, FLAGS.vector_size],
            initializer=tf.uniform_unit_scaling_initializer(1.43))
        self._bias_0 = tf.get_variable("bias_0", [FLAGS.vector_size],
                                       initializer=tf.zeros_initializer())
        self._weights = tf.get_variable(
            "weights", [FLAGS.vector_size, FLAGS.vector_size*4],
            initializer=tf.uniform_unit_scaling_initializer(1.0))
        self._bias = tf.get_variable("bias", [FLAGS.vector_size*4],
                                     initializer=tf.zeros_initializer())
    # One hidden layer
    x = tf.concat([left, right], 1)
    h0 = tf.nn.relu(tf.add(tf.matmul(x, self._weights_0), self._bias_0))

    # Do a single matrix multiply to compute all gates
    h1 = tf.add(tf.matmul(h0, self._weights), self._bias)
    (hfl, hfr, hi, hg) = tf.split(h1, 4, axis=1)

    fl = tf.nn.sigmoid(hfl)  # forget left
    fr = tf.nn.sigmoid(hfr)  # forget right
    i = tf.nn.sigmoid(hi)    # input gate
    g = tf.nn.tanh(hg)       # computation

    ylr = tf.add(tf.multiply(fl, left), tf.multiply(fr, right))
    ygi = tf.multiply(i, g)
    y = tf.add(ylr, ygi)

    return y
项目:the-wavenet-pianist    作者:821760408-sp    | 项目源码 | 文件源码
def _create_conv_layer(fitler_width, in_channels, out_channels):
        kernel_shape = [fitler_width,
                        in_channels,
                        out_channels]
        biases_shape = [out_channels]
        return {
            'weights': tf.get_variable(
                'weights',
                kernel_shape,
                initializer=tf.uniform_unit_scaling_initializer(1.0)),
            'biases': tf.get_variable(
                'biases',
                biases_shape,
                initializer=tf.constant_initializer(0.0))
        }
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:convseg    作者:chqiwang    | 项目源码 | 文件源码
def build_graph(self):
        parameters = self.parameters
        with tf.variable_scope(name_or_scope=self.scope, initializer=tf.uniform_unit_scaling_initializer()):
            seq_ids_pl, seq_other_ids_pls, inputs = self.build_input_graph(vocab_size=parameters['vocab_size'],
                                                                           emb_size=parameters['emb_size'],
                                                                           word_window_size=parameters['word_window_size'],
                                                                           word_vocab_size=parameters['word_vocab_size'],
                                                                           word_emb_size=parameters['word_emb_size'])
            stag_ids_pl, seq_lengths_pl, is_train_pl, cost_op, train_cost_op, scores_op, summary_op = \
                self.build_tagging_graph(inputs=inputs,
                                         num_tags=parameters['num_tags'],
                                         use_crf=parameters['use_crf'],
                                         lamd=parameters['lamd'],
                                         dropout_emb=parameters['dropout_emb'],
                                         dropout_hidden=parameters['dropout_hidden'],
                                         hidden_layers=parameters['hidden_layers'],
                                         channels=parameters['channels'],
                                         kernel_size=parameters['kernel_size'],
                                         use_bn=parameters['use_bn'],
                                         use_wn=parameters['use_wn'],
                                         active_type=parameters['active_type'])
        self.seq_ids_pl = seq_ids_pl
        self.seq_other_ids_pls = seq_other_ids_pls
        self.stag_ids_pl = stag_ids_pl
        self.seq_lengths_pl = seq_lengths_pl
        self.is_train_pl = is_train_pl
        self.cost_op = cost_op
        self.train_cost_op = train_cost_op
        self.scores_op = scores_op
        self.summary_op = summary_op
项目:f-lm    作者:okuchaiev    | 项目源码 | 文件源码
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    else:        
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    return [tf.get_variable(name + "_" + str(i), [shard_size, shape[1]],
                            initializer=initializer, dtype=dtype) for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:l3    作者:jacobandreas    | 项目源码 | 文件源码
def _embed(t_in, n_embeddings, n_out):
    v = tf.get_variable(
            "embed", shape=(n_embeddings, n_out),
            initializer=tf.uniform_unit_scaling_initializer())
    t_embed = tf.nn.embedding_lookup(v, t_in)
    return t_embed
项目:l3    作者:jacobandreas    | 项目源码 | 文件源码
def _linear(t_in, n_out):
    assert len(t_in.get_shape()) == 2
    v_w = tf.get_variable(
            "w",
            shape=(t_in.get_shape()[1], n_out),
            initializer=tf.uniform_unit_scaling_initializer(
                factor=INIT_SCALE))
    v_b = tf.get_variable(
            "b",
            shape=n_out,
            initializer=tf.constant_initializer(0))
    return tf.einsum("ij,jk->ik", t_in, v_w) + v_b
项目:l3    作者:jacobandreas    | 项目源码 | 文件源码
def _embed(t_in, n_embeddings, n_out):
    v = tf.get_variable(
            "embed", shape=(n_embeddings, n_out),
            initializer=tf.uniform_unit_scaling_initializer())
    t_embed = tf.nn.embedding_lookup(v, t_in)
    return t_embed
项目:AM-GAN    作者:ZhimingZhou    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:InsuranceQA_zh    作者:l11x0m7    | 项目源码 | 文件源码
def add_embeddings(self):
        with tf.variable_scope('embedding'):
            embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
            q_embed = tf.nn.embedding_lookup(embeddings, self.q)
            aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
            aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
            return q_embed, aplus_embed, aminus_embed

    # Hidden Layer
项目:InsuranceQA_zh    作者:l11x0m7    | 项目源码 | 文件源码
def add_hl(self, q_embed, aplus_embed, aminus_embed):
        with tf.variable_scope('HL'):
            W = tf.get_variable('weights', shape=[self.config.embedding_size, self.config.hidden_size], initializer=tf.uniform_unit_scaling_initializer())
            b = tf.get_variable('biases', initializer=tf.constant(0.1, shape=[self.config.hidden_size]))
            h_q = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(q_embed, [-1, self.config.embedding_size]), W)+b), [-1, self.config.sequence_length, self.config.hidden_size])
            h_ap = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(aplus_embed, [-1, self.config.embedding_size]), W)+b), [-1, self.config.sequence_length, self.config.hidden_size])
            h_am = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(aminus_embed, [-1, self.config.embedding_size]), W)+b), [-1, self.config.sequence_length, self.config.hidden_size])
            tf.add_to_collection('total_loss', 0.5*self.config.l2_reg_lambda*tf.nn.l2_loss(W))
            return h_q, h_ap, h_am

    # CNN?
项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def _build(self):
        n_inpt_channels = self.inpt.get_shape().as_list()[-1]
        n_dfn_filter_params = n_inpt_channels * self.n_channels * np.prod(self.ksize)

        filter_inpt = self.filter_inpt
        for i in xrange(1, self.n_param_layers):
            filter_inpt = AffineLayer(filter_inpt, filter_inpt.get_shape().as_list()[-1],
                                      transfer=tf.nn.elu, name='param_layer_{}'.format(i))

        dfn_weight_init = tf.uniform_unit_scaling_initializer(self.dfn_weight_factor)
        self.dynamic_weights = AffineLayer(filter_inpt, n_dfn_filter_params, transfer=None,
                                           weight_init=dfn_weight_init, bias_init=dfn_weight_init, name='dynamic_weights')

        dfn_weights = tf.reshape(self.dynamic_weights, (-1, 1, 1, n_dfn_filter_params))
        dfn = DynamicFilterConvLayer(self.inpt, dfn_weights, self.ksize, name='dfn')

        if self.adaptive_bias:
            dfn_bias_init = tf.uniform_unit_scaling_initializer(self.dfn_bias_factor)
            self.dynamic_bias = AffineLayer(filter_inpt, self.n_channels, transfer=None,
                                            weight_init=dfn_bias_init, bias_init=dfn_bias_init,
                                            name='dynamic_bias')

            dfn_adaptive_bias = tf.reshape(self.dynamic_bias, (-1, 1, 1, self.n_channels))
            dfn += dfn_adaptive_bias

        if self.bias:
            self.bias = tf.get_variable('dfn_bias', (1, 1, 1, self.n_channels))
            dfn += self.bias

        self.features = self.transfer(dfn)
项目:TF-deeplab    作者:chenxi116    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.batch_size, -1])
    w = tf.get_variable(
        'DW', [self.filters[-1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:TF-deeplab    作者:chenxi116    | 项目源码 | 文件源码
def _fully_convolutional(self, x, out_dim):
    """FullyConvolutional layer for final output."""
    w = tf.get_variable(
        'DW', [1, 1, self.filters[-1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())  
    return tf.nn.conv2d(x, w, self._stride_arr(1), padding='SAME') + b
项目:DeepLab    作者:2prime    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:ran    作者:kentonl    | 项目源码 | 文件源码
def sharded_variable(name, shape, num_shards, dtype=tf.float32, transposed=False):
    # The final size of the sharded variable may be larger than requested.
    # This should be fine for embeddings.
    shard_size = int((shape[0] + num_shards - 1) / num_shards)
    if transposed:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    else:
        initializer = tf.uniform_unit_scaling_initializer(dtype=dtype)
    return [tf.get_variable(name + "_%d" % i, [shard_size, shape[1]], initializer=initializer, dtype=dtype)
            for i in range(num_shards)]


# XXX(rafal): Code below copied from rnn_cell.py
项目:TF-Speech-Recognition    作者:ZhishengWang    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
        """FullyConnected layer for final output."""
        x = tf.reshape(x, [self.hps.batch_size, -1])
        w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())

        return tf.nn.xw_plus_b(x, w, b)
项目:TF-Speech-Recognition    作者:ZhishengWang    | 项目源码 | 文件源码
def _fully_connected_v2(self, x, name, out_dim):
        """FullyConnected layer for final output."""
        #x = tf.reshape(x, [self.hps.batch_size, -1])
        with tf.variable_scope(name):
            w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
            b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())

        return tf.nn.xw_plus_b(x, w, b)
项目:InsuranceQA    作者:l11x0m7    | 项目源码 | 文件源码
def add_embeddings(self):
        with tf.variable_scope('embedding'):
            embeddings = tf.get_variable('embeddings', shape=[self.config.vocab_size, self.config.embedding_size], initializer=tf.uniform_unit_scaling_initializer())
            q_embed = tf.nn.embedding_lookup(embeddings, self.q)
            aplus_embed = tf.nn.embedding_lookup(embeddings, self.aplus)
            aminus_embed = tf.nn.embedding_lookup(embeddings, self.aminus)
            return q_embed, aplus_embed, aminus_embed

    # Hidden Layer
项目:InsuranceQA    作者:l11x0m7    | 项目源码 | 文件源码
def add_hl(self, q_embed, aplus_embed, aminus_embed):
        with tf.variable_scope('HL'):
            W = tf.get_variable('weights', shape=[self.config.embedding_size, self.config.hidden_size], initializer=tf.uniform_unit_scaling_initializer())
            b = tf.get_variable('biases', initializer=tf.constant(0.1, shape=[self.config.hidden_size]))
            h_q = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(q_embed, [-1, self.config.embedding_size]), W)+b), [self.config.batch_size, self.config.sequence_length, -1])
            h_ap = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(aplus_embed, [-1, self.config.embedding_size]), W)+b), [self.config.batch_size, self.config.sequence_length, -1])
            h_am = tf.reshape(tf.nn.tanh(tf.matmul(tf.reshape(aminus_embed, [-1, self.config.embedding_size]), W)+b), [self.config.batch_size, self.config.sequence_length, -1])
            tf.add_to_collection('total_loss', 0.5*self.config.l2_reg_lambda*tf.nn.l2_loss(W))
            # print 'h_q[shape]:', tf.shape(h_q)
            # print 'h_ap[shape]:', tf.shape(h_ap)
            # print 'h_am[shape]:', tf.shape(h_am)
            return h_q, h_ap, h_am

    # CNN?
项目:ray    作者:ray-project    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
        """FullyConnected layer for final output."""
        x = tf.reshape(x, [self.hps.batch_size, -1])
        w = tf.get_variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        b = tf.get_variable('biases', [out_dim],
                            initializer=tf.constant_initializer())
        return tf.nn.xw_plus_b(x, w, b)
项目:view-finding-network    作者:yiling-chen    | 项目源码 | 文件源码
def score(feature_vec):
    W = tf.get_variable("W", shape=[feature_vec.get_shape()[1],1], initializer=tf.uniform_unit_scaling_initializer()) # init_weight([int(feature_vec.get_shape()[1]),1])
    return tf.matmul(feature_vec,W)
项目:DSSA    作者:jzbjyb    | 项目源码 | 文件源码
def check_params(self):
        if self.interaction not in DSSA.VALID_INTERACTION:
            raise ValueError('interaction not valid, it should be one of {}'
                             .format(', '.join(map(lambda x: '"' + x + '"', DSSA.VALID_INTERACTION))))
        if self.cell_type not in DSSA.VALID_CELL_TYPE:
            raise ValueError('cell_type not valid, it should be one of {}'
                             .format(', '.join(map(lambda x: '"' + x + '"', DSSA.VALID_CELL_TYPE))))
        if not isinstance(self.doc_emb, np.ndarray) or not isinstance(self.query_emb, np.ndarray):
            raise ValueError('both doc_emb and query_emb should by instance of numpy.ndarray')
        self.doc_emb_actual_size = self.n_rel_feat * self.most_n_subquery + self.n_doc_emb
        if self.doc_emb.shape[1] != self.doc_emb_actual_size:
            raise ValueError('doc_emb shape[1] is unexpected. {} is desired while we got {}'
                             .format(self.doc_emb_actual_size, self.doc_emb.shape[1]))
        self.query_emb_actual_size = self.n_query_emb + 1
        if self.query_emb.shape[1] != self.query_emb_actual_size:
            raise ValueError('query_emb shape[1] is unexpected. {} is desired while we got {}'
                             .format(self.query_emb_actual_size, self.query_emb.shape[1]))
        if self.optimization not in DSSA.VALID_OPTIMIZATION:
            raise ValueError('optimization not valid, it should be one of {}'
                             .format(', '.join(map(lambda x: '"' + x + '"', DSSA.VALID_OPTIMIZATION))))
        self.input_dim = 1 + self.most_n_subquery
        self.expand_input_dim = \
            self.n_rel_feat * self.most_n_subquery + self.n_doc_emb + (self.n_query_emb + 1) * self.most_n_subquery
        if self.reuse_model and not hasattr(self, 'session_'): # read model from file
            self.graph_ = tf.Graph()
            with self.graph_.as_default():
                tf.set_random_seed(self.random_seed)
                with vs.variable_scope('DSSA', initializer=
                tf.uniform_unit_scaling_initializer(seed=self.random_seed)) as scope:
                    self.build_graph()
                    scope.reuse_variables()
                    self.build_graph_test()
            self.session_ = tf.Session(graph=self.graph_)
            print('load model from "{}"'.format(self.reuse_model))
            self.saver.restore(self.session_, self.reuse_model)
项目:TF-resnet    作者:chenxi116    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.batch_size, -1])
    w = tf.get_variable(
        'DW', [self.filters[-1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:TF-resnet    作者:chenxi116    | 项目源码 | 文件源码
def _fully_convolutional(self, x, out_dim):
    """FullyConvolutional layer for final output."""
    w = tf.get_variable(
        'DW', [1, 1, self.filters[-1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())  
    return tf.nn.conv2d(x, w, self._stride_arr(1), padding='SAME') + b
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def FullyConnected(x, out_dim,
                   W_init=None, b_init=None,
                   nl=tf.nn.relu, use_bias=True):
    """
    Fully-Connected layer.

    :param input: a tensor to be flattened except the first dimension.
    :param out_dim: output dimension
    :param W_init: initializer for W. default to `xavier_initializer_conv2d`.
    :param b_init: initializer for b. default to zero initializer.
    :param nl: nonlinearity. default to `relu`.
    :param use_bias: whether to use bias. a boolean default to True
    :returns: a 2D tensor
    """
    x = batch_flatten(x)
    in_dim = x.get_shape().as_list()[1]

    if W_init is None:
        #W_init = tf.truncated_normal_initializer(stddev=1 / math.sqrt(float(in_dim)))
        W_init = tf.uniform_unit_scaling_initializer(factor=1.43)
    if b_init is None:
        b_init = tf.constant_initializer()

    W = tf.get_variable('W', [in_dim, out_dim], initializer=W_init)
    if use_bias:
        b = tf.get_variable('b', [out_dim], initializer=b_init)
    prod = tf.nn.xw_plus_b(x, W, b) if use_bias else tf.matmul(x, W)
    return nl(prod, name='output')
项目:dawn-bench-models    作者:stanford-futuredata    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:tensorflow-input-pipelines    作者:ischlag    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    return slim.layers.fully_connected(x, out_dim,
                                       activation_fn=None,
                                       #weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0)
                                       weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0)
                                       #weights_initializer=tf.random_normal_initializer(stddev=0.01)
                                       #weights_initializer=tf.contrib.layers.variance_scaling_initializer()
                                       )
项目:tensorflow-input-pipelines    作者:ischlag    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:tensorflow-input-pipelines    作者:ischlag    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    return slim.layers.fully_connected(x, out_dim,
                                       activation_fn=None,
                                       #weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0)
                                       weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0)
                                       #weights_initializer=tf.random_normal_initializer(stddev=0.01)
                                       #weights_initializer=tf.contrib.layers.variance_scaling_initializer()
                                       )
项目:tensorflow-input-pipelines    作者:ischlag    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    return slim.layers.fully_connected(x, out_dim,
                                       activation_fn=None,
                                       #weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0)
                                       weights_initializer=tf.uniform_unit_scaling_initializer(factor=1.0)
                                       #weights_initializer=tf.random_normal_initializer(stddev=0.01)
                                       #weights_initializer=tf.contrib.layers.variance_scaling_initializer()
                                       )
项目:tf-tutorial    作者:zchen0211    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:cmcl    作者:chhwang    | 项目源码 | 文件源码
def fully_connected(name, l, out_dim):
    """Fully connected layer.

    Args:
      name: Scope name of this function
      l : Output of previous layer
      out_dim: Dimension of each output feature
    """
    with tf.variable_scope(name):
        l = tf.reshape(l, [l.get_shape().as_list()[0], -1])
        weights = tf.get_variable('weights', [l.get_shape()[1], out_dim], tf.float32,
                                  initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        biases = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.constant_initializer())
        return tf.nn.xw_plus_b(l, weights, biases)
项目:tanda    作者:HazyResearch    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:deeplearning-benchmark    作者:awslabs    | 项目源码 | 文件源码
def _fully_connected(self, x, out_dim):
    """FullyConnected layer for final output."""
    x = tf.reshape(x, [self.hps.batch_size, -1])
    w = tf.get_variable(
        'DW', [x.get_shape()[1], out_dim],
        initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
    b = tf.get_variable('biases', [out_dim],
                        initializer=tf.constant_initializer())
    return tf.nn.xw_plus_b(x, w, b)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __init__(self, num_units_out, activation=tf.nn.relu, initializer=None,
               input_keep_prob=None, output_keep_prob=None,
               normalization_fn=None, weight_norm=False, name=None):
    """Initializes the layer.

    Args:
      num_units_out: The number of output units in the layer.
      activation: The activation function. Default is ReLU. Use `None` to get a
        linear layer.
      initializer: The initializer for the weights. Defaults to uniform unit
        scaling with factor derived in <http://arxiv.org/pdf/1412.6558v3.pdf>
        if activation is ReLU, ReLU6, tanh, or linear. Otherwise defaults to
        truncated normal initialization with a standard deviation of 0.01.
      input_keep_prob: Optional scalar float32 tensor for dropout on input.
        Feed 1.0 at serving to disable dropout.
      output_keep_prob: Optional scalar float32 tensor for dropout on output.
        Feed 1.0 at serving to disable dropout.
      normalization_fn: Optional normalization function that will be inserted
        before nonlinearity.
      weight_norm: A bool to control whether weight normalization is used. See
        https://arxiv.org/abs/1602.07868 for how it works.
      name: An optional string name. Defaults to `FC_%d % num_units_out`. Used
        to name the variable scope where the variables for the layer live.
    """
    self.set_constructor_args('td.FC', *get_local_arguments(FC.__init__, True))

    if not initializer:
      # TODO(SamEisenstat): This constant is calibrated for ReLU, something else
      # might be better for ReLU6.
      if activation in [tf.nn.relu, tf.nn.relu6]:
        initializer = tf.uniform_unit_scaling_initializer(1.43)
      elif activation == tf.tanh:
        initializer = tf.uniform_unit_scaling_initializer(1.15)
      elif not activation:
        initializer = tf.uniform_unit_scaling_initializer(1.0)
      else:
        initializer = tf.truncated_normal_initializer(stddev=0.01)
    self._activation = activation
    self._initializer = initializer
    self._input_keep_prob = input_keep_prob
    self._output_keep_prob = output_keep_prob
    self._normalization_fn = normalization_fn
    self._weight_norm = weight_norm
    if name is None: name = 'FC_%d' % num_units_out
    super(FC, self).__init__(
        output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __init__(self, num_buckets, num_units_out, initializer=None, name=None,
               trainable=True, mod_inputs=True):
    """Initializes the layer.

    Args:
      num_buckets: How many buckets the embedding has.
      num_units_out: The number of output units in the layer.
      initializer: the initializer for the weights. Defaults to uniform unit
        scaling. The initializer can also be a Tensor or numpy array, in which
        case the weights are initialized to this value and shape. Note that in
        this case the weights will still be trainable unless you also pass
        `trainable=False`.
      name: An optional string name. Defaults to
        `Embedding_%d_%d % (num_buckets, num_units_out)`. Used to name the
        variable scope where the variables for the layer live.
      trainable: Whether or not to make the weights trainable.
      mod_inputs: Whether or not to mod the input by the number of buckets.

    Raises:
      ValueError: If the shape of `weights` is not
        `(num_buckets, num_units_out)`.
    """

    self.set_constructor_args('td.Embedding',
                              *get_local_arguments(Embedding.__init__, True))

    self._weights_shape = (num_buckets, num_units_out)
    if name is None: name = 'Embedding_%d_%d' % self._weights_shape
    if initializer is None:
      initializer = tf.uniform_unit_scaling_initializer(1.0)
    elif isinstance(initializer, np.ndarray):
      initializer = tf.convert_to_tensor(initializer)
    if isinstance(initializer, tf.Tensor):
      initializer.set_shape(self._weights_shape)
      self._weights_shape = None  # otherwise get_variable barfs
    self._initializer = initializer
    self._num_buckets = num_buckets
    self._num_units_out = num_units_out
    self._trainable = trainable
    self._mod_inputs = bool(mod_inputs)
    super(Embedding, self).__init__(
        output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def conv1d(x,
           num_filters,
           filter_length,
           name,
           dilation=1,
           causal=True,
           kernel_initializer=tf.uniform_unit_scaling_initializer(1.0),
           biases_initializer=tf.constant_initializer(0.0)):
  """Fast 1D convolution that supports causal padding and dilation.

  Args:
    x: The [mb, time, channels] float tensor that we convolve.
    num_filters: The number of filter maps in the convolution.
    filter_length: The integer length of the filter.
    name: The name of the scope for the variables.
    dilation: The amount of dilation.
    causal: Whether or not this is a causal convolution.
    kernel_initializer: The kernel initialization function.
    biases_initializer: The biases initialization function.

  Returns:
    y: The output of the 1D convolution.
  """
  batch_size, length, num_input_channels = x.get_shape().as_list()
  assert length % dilation == 0

  kernel_shape = [1, filter_length, num_input_channels, num_filters]
  strides = [1, 1, 1, 1]
  biases_shape = [num_filters]
  padding = 'VALID' if causal else 'SAME'

  with tf.variable_scope(name):
    weights = tf.get_variable(
        'W', shape=kernel_shape, initializer=kernel_initializer)
    biases = tf.get_variable(
        'biases', shape=biases_shape, initializer=biases_initializer)

  x_ttb = time_to_batch(x, dilation)
  if filter_length > 1 and causal:
    x_ttb = tf.pad(x_ttb, [[0, 0], [filter_length - 1, 0], [0, 0]])

  x_ttb_shape = x_ttb.get_shape().as_list()
  x_4d = tf.reshape(x_ttb, [x_ttb_shape[0], 1,
                            x_ttb_shape[1], num_input_channels])
  y = tf.nn.conv2d(x_4d, weights, strides, padding=padding)
  y = tf.nn.bias_add(y, biases)
  y_shape = y.get_shape().as_list()
  y = tf.reshape(y, [y_shape[0], y_shape[2], num_filters])
  y = batch_to_time(y, dilation)
  y.set_shape([batch_size, length, num_filters])
  return y
项目:the-wavenet-pianist    作者:821760408-sp    | 项目源码 | 文件源码
def _enc_upsampling_conv(encoding,
                             audio_length,
                             filter_length=1024,
                             time_stride=512):
        """Upsample local conditioning encoding to match time dim. of audio  
        :param encoding: [mb, timeframe, channels] Local conditionining encoding
        :param audio_length: Length of time dimension of audio 
        :param filter_length: transpose conv. filter length
        :param time_stride: stride along time dimension (upsamp. factor)
        :return: upsampled local conditioning encoding
        """
        with tf.variable_scope('upsampling_conv'):
            batch_size, _, enc_channels = encoding.get_shape().as_list()
            shape = tf.shape(encoding)
            strides = [1, 1, time_stride, 1]
            output_length = (shape[1] - 1) * time_stride + filter_length
            output_shape = tf.stack(
                [batch_size, 1, output_length, enc_channels])

            kernel_shape = [1, filter_length, enc_channels, enc_channels]
            biases_shape = [enc_channels]

            upsamp_weights = tf.get_variable(
                'weights',
                kernel_shape,
                initializer=tf.uniform_unit_scaling_initializer(1.0))
            upsamp_biases = tf.get_variable(
                'biases',
                biases_shape,
                initializer=tf.constant_initializer(0.0))

            encoding = tf.reshape(encoding,
                                  [batch_size, 1, shape[1], enc_channels])
            upsamp_conv = tf.nn.conv2d_transpose(
                encoding,
                upsamp_weights, output_shape, strides, padding='VALID')
            output = tf.nn.bias_add(upsamp_conv, upsamp_biases)

            output = tf.reshape(output,
                                [batch_size, output_length, enc_channels])
            output_sliced = tf.slice(
                output, [0, 0, 0],
                tf.stack([-1, audio_length, -1]))
            output_sliced.set_shape([batch_size, audio_length, enc_channels])
            return output_sliced

    # especially for global conditioning coz it doesn't algin with audio input
    # on the time dimension, and needs broadcasting its value to input;
    # for local conditioning, we've already match their size