Python tensorflow 模块,constant_initializer() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tensorflow.constant_initializer()

项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
    with tf.variable_scope(name):
        stride_shape = [1, stride[0], stride[1], 1]
        filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:3])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = np.prod(filter_shape[:2]) * num_filters
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        return tf.nn.conv2d(x, w, stride_shape, pad) + b
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def batchnorm(x, name, phase, updates, gamma=0.96):
    k = x.get_shape()[1]
    runningmean = tf.get_variable(name+"/mean", shape=[1, k], initializer=tf.constant_initializer(0.0), trainable=False)
    runningvar = tf.get_variable(name+"/var", shape=[1, k], initializer=tf.constant_initializer(1e-4), trainable=False)
    testy = (x - runningmean) / tf.sqrt(runningvar)

    mean_ = mean(x, axis=0, keepdims=True)
    var_ = mean(tf.square(x), axis=0, keepdims=True)
    std = tf.sqrt(var_)
    trainy = (x - mean_) / std

    updates.extend([
        tf.assign(runningmean, runningmean * gamma + mean_ * (1 - gamma)),
        tf.assign(runningvar, runningvar * gamma + var_ * (1 - gamma))
    ])

    y = switch(phase, trainy, testy)

    out = y * tf.get_variable(name+"/scaling", shape=[1, k], initializer=tf.constant_initializer(1.0), trainable=True)\
            + tf.get_variable(name+"/translation", shape=[1,k], initializer=tf.constant_initializer(0.0), trainable=True)
    return out

# ================================================================
# Mathematical utils
# ================================================================
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_without_residuals(self):
    inputs = tf.constant(np.random.randn(1, 2))
    state = (tf.constant(np.random.randn(1, 2)),
             tf.constant(np.random.randn(1, 2)))

    with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
      standard_cell = tf.contrib.rnn.MultiRNNCell(
          [tf.contrib.rnn.GRUCell(2) for _ in range(2)], state_is_tuple=True)
      res_standard = standard_cell(inputs, state, scope="standard")

      test_cell = rnn_cell.ExtendedMultiRNNCell(
          [tf.contrib.rnn.GRUCell(2) for _ in range(2)])
      res_test = test_cell(inputs, state, scope="test")

    with self.test_session() as sess:
      sess.run([tf.global_variables_initializer()])
      res_standard_, res_test_, = sess.run([res_standard, res_test])

    # Make sure it produces the same results as the standard cell
    self.assertAllClose(res_standard_[0], res_test_[0])
    self.assertAllClose(res_standard_[1][0], res_test_[1][0])
    self.assertAllClose(res_standard_[1][1], res_test_[1][1])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _test_with_residuals(self, inputs, **kwargs):
    """Runs the cell in a session"""
    inputs = tf.convert_to_tensor(inputs)
    state = (tf.constant(np.random.randn(1, 2)),
             tf.constant(np.random.randn(1, 2)))

    with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
      test_cell = rnn_cell.ExtendedMultiRNNCell(
          [tf.contrib.rnn.GRUCell(2) for _ in range(2)],
          residual_connections=True,
          **kwargs)
      res_test = test_cell(inputs, state, scope="test")

    with self.test_session() as sess:
      sess.run([tf.global_variables_initializer()])
      return sess.run(res_test)
项目:aapm_thoracic_challenge    作者:xf4j    | 项目源码 | 文件源码
def conv3d(input_, output_dim, f_size, is_training, scope='conv3d'):
    with tf.variable_scope(scope) as scope:
        # VGG network uses two 3*3 conv layers to effectively increase receptive field
        w1 = tf.get_variable('w1', [f_size, f_size, f_size, input_.get_shape()[-1], output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1 = tf.nn.conv3d(input_, w1, strides=[1, 1, 1, 1, 1], padding='SAME')
        b1 = tf.get_variable('b1', [output_dim], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.bias_add(conv1, b1)
        bn1 = tf.contrib.layers.batch_norm(conv1, is_training=is_training, scope='bn1',
                                           variables_collections=['bn_collections'])
        r1 = tf.nn.relu(bn1)

        w2 = tf.get_variable('w2', [f_size, f_size, f_size, output_dim, output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2 = tf.nn.conv3d(r1, w2, strides=[1, 1, 1, 1, 1], padding='SAME')
        b2 = tf.get_variable('b2', [output_dim], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.bias_add(conv2, b2)
        bn2 = tf.contrib.layers.batch_norm(conv2, is_training=is_training, scope='bn2',
                                           variables_collections=['bn_collections'])
        r2 = tf.nn.relu(bn2)
        return r2
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def deconv2d(input_, output_shape,
       k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
       name="deconv2d", with_w=False):
    with tf.variable_scope(name):
        # filter : [height, width, output_channels, in_channels]
        w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
                  initializer=tf.random_normal_initializer(stddev=stddev))

        tf_output_shape=tf.stack(output_shape)
        deconv = tf.nn.conv2d_transpose(input_, w, output_shape=tf_output_shape,
                strides=[1, d_h, d_w, 1])

        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        #deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
        deconv = tf.reshape(tf.nn.bias_add(deconv, biases), tf_output_shape)

        if with_w:
            return deconv, w, biases
        else:
            return deconv
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
    shape = input_.get_shape().as_list()

    #mat_shape=tf.stack([tf.shape(input_)[1],output_size])
    mat_shape=[shape[1],output_size]

    with tf.variable_scope(scope or "Linear"):
        #matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
        matrix = tf.get_variable("Matrix", mat_shape, tf.float32,
                     tf.random_normal_initializer(stddev=stddev))
        bias = tf.get_variable("bias", [output_size],
                   initializer=tf.constant_initializer(bias_start))
        if with_w:
            return tf.matmul(input_, matrix) + bias, matrix, bias
        else:
            return tf.matmul(input_, matrix) + bias


#minibatch method that improves on openai
#because it doesn't fix batchsize:
#TODO: recheck when not sleepy
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _get_weight_variable(self, layer_name, name, shape, L2=1):
        wname = '%s/%s:0'%(layer_name,name)
        fanin, fanout = shape[-2:]
        for dim in shape[:-2]:
            fanin *= float(dim)
            fanout *= float(dim)

        sigma = self._xavi_norm(fanin, fanout)
        if self.weights is None or wname not in self.weights:
            w1 = tf.get_variable(name,initializer=tf.truncated_normal(shape = shape,
                mean=0,stddev = sigma))
            print('{:>23} {:>23}'.format(wname, 'randomly initialize'))
        else:
            w1 = tf.get_variable(name, shape = shape,
                initializer=tf.constant_initializer(value=self.weights[wname],dtype=tf.float32))
            self.loaded_weights[wname]=1
        if wname != w1.name:
            print(wname,w1.name)
            assert False
        tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(w1)*L2)
        return w1
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        if train:
            with tf.variable_scope(self.name) as scope:
                self.beta = tf.get_variable("beta", [shape[-1]],
                                    initializer=tf.constant_initializer(0.))
                self.gamma = tf.get_variable("gamma", [shape[-1]],
                                    initializer=tf.random_normal_initializer(1., 0.02))

                batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
                ema_apply_op = self.ema.apply([batch_mean, batch_var])
                self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)

                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
        else:
            mean, var = self.ema_mean, self.ema_var

        normed = tf.nn.batch_norm_with_global_normalization(
                x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)

        return normed

# standard convolution layer
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False, name='MD'):
    num_inputs=df_dim*4
    theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
    log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0))
    W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0))
    W = tf.reshape(W,[-1,num_kernels*dim_per_kernel])
    x = input
    x=tf.reshape(x, [batchsize,num_inputs])
    activation = tf.matmul(x, W)
    activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel])
    abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2),
                                                1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1))
    f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif))
    print(f.get_shape())
    print(input.get_shape())
    return tf.concat(1,[x, f])
项目:TAC-GAN    作者:dashayushman    | 项目源码 | 文件源码
def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
             name="deconv2d", with_w=False):
    with tf.variable_scope(name):
        # filter : [height, width, output_channels, in_channels]
        #w = tf.get_variable('w', [k_h, k_h, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev))
        w = tf.get_variable('w', [k_h, k_h, output_shape[-1], input_.get_shape()[-1]], initializer = tf.contrib.layers.xavier_initializer())
        try:
            deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
                                strides=[1, d_h, d_w, 1])

        # Support for verisons of TensorFlow before 0.7.0
        except AttributeError:
            deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
                                strides=[1, d_h, d_w, 1])

        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())

        if with_w:
            return deconv, w, biases
        else:
            return deconv
项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def make_moving_average(name, value, init, decay, log=True):
    """Creates an exp-moving average of `value` and an update op, which is added to UPDATE_OPS collection.

    :param name: string, name of the created moving average tf.Variable
    :param value: tf.Tensor, the value to be averaged
    :param init: float, an initial value for the moving average
    :param decay: float between 0 and 1, exponential decay of the moving average
    :param log: bool, add a summary op if True
    :return: tf.Tensor, the moving average
    """
    var = tf.get_variable(name, shape=value.get_shape(),
                          initializer=tf.constant_initializer(init), trainable=False)

    update = moving_averages.assign_moving_average(var, value, decay, zero_debias=False)
    tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update)
    if log:
        tf.summary.scalar(name, var)

    return var
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def __call__(self, input_layer, output_shape,
                 k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
                 name="deconv2d"):
        output_shape[0] = input_layer.shape[0]
        ts_output_shape = tf.pack(output_shape)
        with tf.variable_scope(name):
            # filter : [height, width, output_channels, in_channels]
            w = self.variable('w', [k_h, k_w, output_shape[-1], input_layer.shape[-1]],
                              init=tf.random_normal_initializer(stddev=stddev))

            try:
                deconv = tf.nn.conv2d_transpose(input_layer, w,
                                                output_shape=ts_output_shape,
                                                strides=[1, d_h, d_w, 1])

            # Support for versions of TensorFlow before 0.7.0
            except AttributeError:
                deconv = tf.nn.deconv2d(input_layer, w, output_shape=ts_output_shape,
                                        strides=[1, d_h, d_w, 1])

            # biases = self.variable('biases', [output_shape[-1]], init=tf.constant_initializer(0.0))
            # deconv = tf.reshape(tf.nn.bias_add(deconv, biases), [-1] + output_shape[1:])
            deconv = tf.reshape(deconv, [-1] + output_shape[1:])

            return deconv
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
        shape = input_layer.shape
        input_ = input_layer.tensor
        try:
            if len(shape) == 4:
                input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
                input_.set_shape([None, np.prod(shape[1:])])
                shape = input_.get_shape().as_list()

            with tf.variable_scope(scope or "Linear"):
                matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
                                       init=tf.random_normal_initializer(stddev=stddev))
                bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
                return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
        except Exception:
            import ipdb; ipdb.set_trace()
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def __call__(self, x, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            # Check if the input size exist.
            input_size = x.get_shape().with_rank(2)[1]
            if input_size is None:
                raise ValueError("Expecting input_size to be set.")

            maxout_Wo = tf.get_variable(name='Wo', shape=(input_size, 2*self._units),
                                        initializer=gaussian_initializer(mean=0.0, std=0.01))
            maxout_b  = tf.get_variable(name='b',  shape=(2*self._units,),
                                        initializer=tf.constant_initializer(0.0))

            # 1st. Compute on all the 2 channels and reshape.
            t = tf.matmul(x, maxout_Wo) + maxout_b
            t = tf.reshape(t, shape=(-1, self._units, 2))

            # 2nd. Do maxout op, now has shape: (None, self._units)
            maxout_t = tf.reduce_max(t, axis=-1)

            return maxout_t
项目:Unsupervised-Anomaly-Detection-with-Generative-Adversarial-Networks    作者:xtarx    | 项目源码 | 文件源码
def deconv2d(input_, output_shape,
             k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
             name="deconv2d", with_w=False):
    with tf.variable_scope(name):
        # filter : [height, width, output_channels, in_channels]
        w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
                            initializer=tf.random_normal_initializer(stddev=stddev))

        try:
            deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
                                            strides=[1, d_h, d_w, 1])

        # Support for verisons of TensorFlow before 0.7.0
        except AttributeError:
            deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
                                    strides=[1, d_h, d_w, 1])

        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())

        if with_w:
            return deconv, w, biases
        else:
            return deconv
项目:Unsupervised-Anomaly-Detection-with-Generative-Adversarial-Networks    作者:xtarx    | 项目源码 | 文件源码
def minibatch_discrimination(input_layer, num_kernels, dim_per_kernel=5, name='minibatch_discrim'):
    # batch_size = input_layer.shape[0]
    # num_features = input_layer.shape[1]
    batch_size = input_layer.get_shape().as_list()[0]
    num_features = input_layer.get_shape().as_list()[1]
    W = tf.get_variable('W', [num_features, num_kernels * dim_per_kernel],
                        initializer=tf.contrib.layers.xavier_initializer())
    b = tf.get_variable('b', [num_kernels], initializer=tf.constant_initializer(0.0))
    activation = tf.matmul(input_layer, W)
    activation = tf.reshape(activation, [batch_size, num_kernels, dim_per_kernel])
    tmp1 = tf.expand_dims(activation, 3)
    tmp2 = tf.transpose(activation, perm=[1, 2, 0])
    tmp2 = tf.expand_dims(tmp2, 0)
    abs_diff = tf.reduce_sum(tf.abs(tmp1 - tmp2), reduction_indices=[2])
    f = tf.reduce_sum(tf.exp(-abs_diff), reduction_indices=[2])
    f = f + b
    return f
项目:photo-editing-tensorflow    作者:JamesChuanggg    | 项目源码 | 文件源码
def deconv2d(input_, output_shape,
             k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
             name="deconv2d", with_w=False):
    with tf.variable_scope(name):
        # filter : [height, width, output_channels, in_channels]
        w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
                            initializer=tf.random_normal_initializer(stddev=stddev))

        try:
            deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
                                strides=[1, d_h, d_w, 1])

        # Support for verisons of TensorFlow before 0.7.0
        except AttributeError:
            deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
                                strides=[1, d_h, d_w, 1])

        biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
        deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())

        if with_w:
            return deconv, w, biases
        else:
            return deconv
项目:photo-editing-tensorflow    作者:JamesChuanggg    | 项目源码 | 文件源码
def __init__(self, sess, pred_network, env, dataset, conf):
    self.sess = sess
        self.pred_network = pred_network
        self.env = env
    self.dataset = dataset

    self.ckpt_dir = conf.ckpt_dir 
    self.ckpt_path = conf.ckpt_path
    self.max_iter = conf.max_iter 
    self.max_to_keep = conf.max_to_keep
    self.batch_size = conf.batch_size
    self.df_dim = 64

        self.learning_rate = conf.learning_rate
        self.learning_rate_minimum = conf.learning_rate_minimum
        self.learning_rate_decay = conf.learning_rate_decay
        self.learning_rate_decay_step = conf.learning_rate_decay_step

    self.global_step = tf.get_variable('global_step', [],initializer=tf.constant_initializer(0), trainable=False)

        self.d_bn1 = batch_norm(name='d_bn1')
        self.d_bn2 = batch_norm(name='d_bn2')
        self.d_bn3 = batch_norm(name='d_bn3')
        self.build_opt()
项目:Fast-Slow-LSTM    作者:amujika    | 项目源码 | 文件源码
def layer_norm_all(h, base, num_units, scope):
    # Layer Norm (faster version)
    #
    # Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
    #
    # Reshapes h in to perform layer norm in parallel
    with tf.variable_scope(scope):
        h_reshape = tf.reshape(h, [-1, base, num_units])
        mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
        var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
        epsilon = tf.constant(1e-3)
        rstd = tf.rsqrt(var + epsilon)
        h_reshape = (h_reshape - mean) * rstd
        # reshape back to original
        h = tf.reshape(h_reshape, [-1, base * num_units])

        alpha = tf.get_variable('layer_norm_alpha', [4 * num_units],
                                initializer=tf.constant_initializer(1.0), dtype=tf.float32)
        bias = tf.get_variable('layer_norm_bias', [4 * num_units],
                               initializer=tf.constant_initializer(0.0), dtype=tf.float32)

    return (h * alpha) + bias
项目:rlflow    作者:tpbarron    | 项目源码 | 文件源码
def build_network(name_scope, env):
    w_init_dense = tf.truncated_normal_initializer() #contrib.layers.xavier_initializer()
    b_init = tf.constant_initializer(value=0.0)

    with tf.variable_scope(name_scope):
        input_tensor = tf.placeholder(tf.float32,
                                      shape=tf_utils.get_input_tensor_shape(env),
                                      name='policy_input_'+name_scope)
        net = tf.contrib.layers.fully_connected(input_tensor,
                                                32, #env.action_space.n, #32,
                                                activation_fn=tf.nn.tanh, #sigmoid,
                                                weights_initializer=w_init_dense,
                                                biases_initializer=b_init,
                                                scope='dense1_'+name_scope)
        net = tf.contrib.layers.fully_connected(net,
                                                env.action_space.n,
                                                weights_initializer=w_init_dense,
                                                biases_initializer=b_init,
                                                scope='dense2_'+name_scope)
        net = tf.contrib.layers.softmax(net)

    return [input_tensor], [net]
项目:cnn_lstm_ctc_ocr    作者:weinman    | 项目源码 | 文件源码
def rnn_layers(features, sequence_length, num_classes):
    """Build a stack of RNN layers from input features"""

    # Input features is [batchSize paddedSeqLen numFeatures]
    logit_activation = tf.nn.relu
    weight_initializer = tf.contrib.layers.variance_scaling_initializer()
    bias_initializer = tf.constant_initializer(value=0.0)

    with tf.variable_scope("rnn"):
        # Transpose to time-major order for efficiency
        rnn_sequence = tf.transpose(features, perm=[1, 0, 2], name='time_major')
        rnn1 = rnn_layer(rnn_sequence, sequence_length, rnn_size, 'bdrnn1')
        rnn2 = rnn_layer(rnn1, sequence_length, rnn_size, 'bdrnn2')
        rnn_logits = tf.layers.dense( rnn2, num_classes+1, 
                                      activation=logit_activation,
                                      kernel_initializer=weight_initializer,
                                      bias_initializer=bias_initializer,
                                      name='logits')
        return rnn_logits
项目:general-deep-image-completion    作者:adamstseng    | 项目源码 | 文件源码
def new_fc_layer( self, bottom, output_size, name ):
        shape = bottom.get_shape().as_list()
        dim = np.prod( shape[1:] )
        x = tf.reshape( bottom, [-1, dim])
        input_size = dim

        with tf.variable_scope(name):
            w = tf.get_variable(
                    "W",
                    shape=[input_size, output_size],
                    initializer=tf.random_normal_initializer(0., 0.005))
            b = tf.get_variable(
                    "b",
                    shape=[output_size],
                    initializer=tf.constant_initializer(0.))
            fc = tf.nn.bias_add( tf.matmul(x, w), b)

        return fc
项目:general-deep-image-completion    作者:adamstseng    | 项目源码 | 文件源码
def new_fc_layer( self, bottom, output_size, name ):
        shape = bottom.get_shape().as_list()
        dim = np.prod( shape[1:] )
        x = tf.reshape( bottom, [-1, dim])
        input_size = dim

        with tf.variable_scope(name):
            w = tf.get_variable(
                    "W",
                    shape=[input_size, output_size],
                    initializer=tf.random_normal_initializer(0., 0.005))
            b = tf.get_variable(
                    "b",
                    shape=[output_size],
                    initializer=tf.constant_initializer(0.))
            fc = tf.nn.bias_add( tf.matmul(x, w), b)

        return fc
项目:docnade    作者:AYLIEN    | 项目源码 | 文件源码
def linear(input, output_dim, scope=None, stddev=None):
    if stddev:
        norm = tf.random_normal_initializer(stddev=stddev)
    else:
        norm = tf.random_normal_initializer(
            stddev=np.sqrt(2.0 / input.get_shape()[1].value)
        )
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        w = tf.get_variable(
            'w',
            [input.get_shape()[1], output_dim],
            initializer=norm
        )
        b = tf.get_variable('b', [output_dim], initializer=const)
    return tf.matmul(input, w) + b
项目:tf_base    作者:ozansener    | 项目源码 | 文件源码
def fc(self, input, num_out, name, relu=True):
        with tf.variable_scope(name) as scope:
            input_shape = input.get_shape()
            if input_shape.ndims == 4:
                # The input is spatial. Vectorize it first.
                dim = 1
                for d in input_shape[1:].as_list():
                    dim *= d
                feed_in = tf.reshape(input, [-1, dim])
            else:
                feed_in, dim = (input, input_shape[-1].value)
            weights = self.make_var('weights', shape=[dim, num_out], init_func=tf.truncated_normal_initializer(stddev = 0.1))
            biases = self.make_var('biases', [num_out], init_func=tf.constant_initializer(0.1))
            op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
            fc = op(feed_in, weights, biases, name=scope.name)
            return fc
项目:brats17    作者:xf4j    | 项目源码 | 文件源码
def conv3d(input_, output_dim, f_size, is_training, scope='conv3d'):
    with tf.variable_scope(scope) as scope:
        # VGG network uses two 3*3 conv layers to effectively increase receptive field
        w1 = tf.get_variable('w1', [f_size, f_size, f_size, input_.get_shape()[-1], output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1 = tf.nn.conv3d(input_, w1, strides=[1, 1, 1, 1, 1], padding='SAME')
        b1 = tf.get_variable('b1', [output_dim], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.bias_add(conv1, b1)
        bn1 = tf.contrib.layers.batch_norm(conv1, is_training=is_training, scope='bn1', decay=0.9,
                                           zero_debias_moving_mean=True, variables_collections=['bn_collections'])
        r1 = tf.nn.relu(bn1)

        w2 = tf.get_variable('w2', [f_size, f_size, f_size, output_dim, output_dim],
                             initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2 = tf.nn.conv3d(r1, w2, strides=[1, 1, 1, 1, 1], padding='SAME')
        b2 = tf.get_variable('b2', [output_dim], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.bias_add(conv2, b2)
        bn2 = tf.contrib.layers.batch_norm(conv2, is_training=is_training, scope='bn2', decay=0.9,
                                           zero_debias_moving_mean=True, variables_collections=['bn_collections'])
        r2 = tf.nn.relu(bn2)
        return r2
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _create_variables(self):
    if self.input_type.dtype != 'float32':
      raise TypeError('FC input dtype must be float32: %s' %
                      self.input_type.dtype)
    if self.input_type.ndim != 1:
      raise TypeError('FC input shape must be 1D: %s' %
                      str(self.input_type.shape))
    self._bias = tf.get_variable(
        'bias', self.output_type.shape, initializer=tf.constant_initializer(0))
    self._weights = tf.get_variable(
        'weights', [self.input_type.shape[0], self.output_type.shape[0]],
        initializer=self._initializer)
    if self._weight_norm:
      self._scales = tf.get_variable(
          'scales',
          [self.output_type.shape[0]],
          initializer=tf.constant_initializer(1.0))
项目:WassersteinGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:WaterGAN    作者:kskin    | 项目源码 | 文件源码
def deconv2d(input_, output_shape,
       k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
       name="deconv2d", with_w=False):
  with tf.variable_scope(name):
    # filter : [height, width, output_channels, in_channels]
    w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
              initializer=tf.random_normal_initializer(stddev=stddev))

    try:
      deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
                strides=[1, d_h, d_w, 1])

    # Support for verisons of TensorFlow before 0.7.0
    except AttributeError:
      deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
                strides=[1, d_h, d_w, 1])

    biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
    deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())

    if with_w:
      return deconv, w, biases
    else:
      return deconv
项目:squeezenet    作者:mtreml    | 项目源码 | 文件源码
def get_deconv_filter(f_shape):
    width = f_shape[0]
    heigh = f_shape[0]
    f = ceil(width/2.0)
    c = (2 * f - 1 - f % 2) / (2.0 * f)
    bilinear = np.zeros([f_shape[0], f_shape[1]])
    for x in range(width):
        for y in range(heigh):
            value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
            bilinear[x, y] = value
    weights = np.zeros(f_shape)
    for i in range(f_shape[2]):
        weights[:, :, i, i] = bilinear

    init = tf.constant_initializer(value=weights,
                                   dtype=tf.float32)
    return tf.get_variable(name="up_filter", initializer=init,
                           shape=weights.shape)
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def biasVariable(shape,bias=0.1,name=None):
    # create a set of bias nodes initialized with a constant 0.1
    name = 'biases' if name is None else name
    return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def biasVariable(shape,bias=0.1,name=None):
    # create a set of bias nodes initialized with a constant 0.1
    name = 'biases' if name is None else name
    return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def make_skipgram_softmax_loss(embeddings_matrix, vocabulary_size, vector_size):
    vectors = tf.get_variable('vectors', (vocabulary_size, vector_size), dtype=tf.float32, initializer=tf.constant_initializer(embeddings_matrix))
    minibatch = tf.placeholder(shape=(None, 2), dtype=tf.int32)

    center_word_vector = tf.nn.embedding_lookup(vectors, minibatch[:,0])
    yhat = tf.matmul(center_word_vector, vectors, transpose_b=True)

    predict_word = minibatch[:,1]
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=predict_word, logits=yhat)
    loss = tf.reduce_mean(loss)
    return vectors, minibatch, loss
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def encode(self, inputs, _input_length, _parses):
        with tf.variable_scope('BagOfWordsEncoder'):
            W = tf.get_variable('W', (self.embed_size, self.output_size))
            b = tf.get_variable('b', shape=(self.output_size,), initializer=tf.constant_initializer(0, tf.float32))

            enc_hidden_states = tf.tanh(tf.tensordot(inputs, W, [[2], [0]]) + b)
            enc_final_state = tf.reduce_sum(enc_hidden_states, axis=1)

            #assert enc_hidden_states.get_shape()[1:] == (self.config.max_length, self.config.hidden_size)
            if self._cell_type == 'lstm':
                enc_final_state = (tf.contrib.rnn.LSTMStateTuple(enc_final_state, enc_final_state),)

            enc_output = tf.nn.dropout(enc_hidden_states, keep_prob=self._dropout, seed=12345)

            return enc_output, enc_final_state
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def add_input_op(self, xavier):
        with tf.variable_scope('embed'):
            # first the embed the input
            if self.config.train_input_embeddings:
                if self.config.input_embedding_matrix:
                    initializer = tf.constant_initializer(self.config.input_embedding_matrix)
                else:
                    initializer = xavier
                input_embed_matrix = tf.get_variable('input_embedding',
                                                     shape=(self.config.dictionary_size, self.config.embed_size),
                                                     initializer=initializer)
            else:
                input_embed_matrix = tf.constant(self.config.input_embedding_matrix)

            # dictionary size x embed_size
            assert input_embed_matrix.get_shape() == (self.config.dictionary_size, self.config.embed_size)

            # now embed the output
            if self.config.train_output_embeddings:
                output_embed_matrix = tf.get_variable('output_embedding',
                                                      shape=(self.config.output_size, self.config.output_embed_size),
                                                      initializer=xavier)
            else:
                output_embed_matrix = tf.constant(self.config.output_embedding_matrix)

            assert output_embed_matrix.get_shape() == (self.config.output_size, self.config.output_embed_size)

        inputs = tf.nn.embedding_lookup([input_embed_matrix], self.input_placeholder)
        # batch size x max length x embed_size
        assert inputs.get_shape()[1:] == (self.config.max_length, self.config.embed_size)
        return inputs, output_embed_matrix
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    return loss, centers
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        with tf.variable_scope(self.name) as scope:
            self.beta = tf.get_variable("beta", shape[1:],
                                        initializer=tf.constant_initializer(0.))
            self.gamma = tf.get_variable("gamma", shape[1:],
                                         initializer=tf.random_normal_initializer(1.,0.02))
            self.mean = tf.get_variable("mean", shape[1:],
                                        initializer=tf.constant_initializer(0.),trainable=False)
            self.variance = tf.get_variable("variance",shape[1:],
                                            initializer=tf.constant_initializer(1.),trainable=False)
            if train:
                batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')

                self.mean.assign(batch_mean)
                self.variance.assign(batch_var)
                ema_apply_op = self.ema.apply([self.mean, self.variance])
                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
            else:
                mean, var = self.ema.average(self.mean), self.ema.average(self.variance)

            normed = tf.nn.batch_normalization(x, mean, var, self.beta, self.gamma, self.epsilon)

        return normed
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_mask(self, max_frames, num_frames):
        mask_array = []
        for i in range(max_frames + 1):
            tmp = [0.0] * max_frames
            for j in range(i):
                tmp[j] = 1.0
            mask_array.append(tmp)
        mask_array = np.array(mask_array)
        mask_init = tf.constant_initializer(mask_array)
        mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames],
                                   dtype = tf.float32, trainable = False, initializer = mask_init)
        mask = tf.nn.embedding_lookup(mask_emb, num_frames)
        return mask
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_support(self, labels, support_type=None):
    if support_type == None:
      support_type = FLAGS.support_type
    if "," in support_type:
      new_labels = []
      for st in support_type.split(","):
        new_labels.append(tf.cast(self.get_support(labels, st), dtype=tf.float32))
      support_labels = tf.concat(new_labels, axis=1)
      return support_labels
    elif support_type == "vertical":
      num_classes = FLAGS.num_classes
      num_verticals = FLAGS.num_verticals
      vertical_file = FLAGS.vertical_file
      vertical_mapping = np.zeros([num_classes, num_verticals], dtype=np.float32)
      float_labels = tf.cast(labels, dtype=tf.float32)
      with open(vertical_file) as F:
        for line in F:
          group = map(int, line.strip().split())
          if len(group) == 2:
            x, y = group
            vertical_mapping[x, y] = 1
      vm_init = tf.constant_initializer(vertical_mapping)
      vm = tf.get_variable("vm", shape = [num_classes, num_verticals], 
                           trainable=False, initializer=vm_init)
      vertical_labels = tf.matmul(float_labels, vm)
      return tf.cast(vertical_labels > 0.2, tf.float32)
    elif support_type == "frequent":
      num_frequents = FLAGS.num_frequents
      frequent_labels = tf.slice(labels, begin=[0, 0], size=[-1, num_frequents])
      frequent_labels = tf.cast(frequent_labels, dtype=tf.float32)
      return frequent_labels
    elif support_type == "label":
      float_labels = tf.cast(labels, dtype=tf.float32)
      return float_labels
    else:
      raise NotImplementedError()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_mask(self, max_frames, num_frames):
    mask_array = []
    for i in xrange(max_frames + 1):
      tmp = [0.0] * max_frames 
      for j in xrange(i):
        tmp[j] = 1.0
      mask_array.append(tmp)
    mask_array = np.array(mask_array)
    mask_init = tf.constant_initializer(mask_array)
    mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames], 
            dtype = tf.float32, trainable = False, initializer = mask_init)
    mask = tf.nn.embedding_lookup(mask_emb, num_frames)
    return mask
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_mask(self, max_frames, num_frames):
    mask_array = []
    for i in xrange(max_frames + 1):
      tmp = [0.0] * max_frames 
      for j in xrange(i):
        tmp[j] = 1.0
      mask_array.append(tmp)
    mask_array = np.array(mask_array)
    mask_init = tf.constant_initializer(mask_array)
    mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames], 
            dtype = tf.float32, trainable = False, initializer = mask_init)
    mask = tf.nn.embedding_lookup(mask_emb, num_frames)
    return mask
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_mask(self, max_frames, num_frames):
    mask_array = []
    for i in xrange(max_frames + 1):
      tmp = [0.0] * max_frames 
      for j in xrange(i):
        tmp[j] = 1.0
      mask_array.append(tmp)
    mask_array = np.array(mask_array)
    mask_init = tf.constant_initializer(mask_array)
    mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames], 
            dtype = tf.float32, trainable = False, initializer = mask_init)
    mask = tf.nn.embedding_lookup(mask_emb, num_frames)
    return mask
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_mask(self, max_frames, num_frames):
    mask_array = []
    for i in xrange(max_frames + 1):
      tmp = [0.0] * max_frames 
      for j in xrange(i):
        tmp[j] = 1.0
      mask_array.append(tmp)
    mask_array = np.array(mask_array)
    mask_init = tf.constant_initializer(mask_array)
    mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames], 
            dtype = tf.float32, trainable = False, initializer = mask_init)
    mask = tf.nn.embedding_lookup(mask_emb, num_frames)
    return mask
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_mask(self, max_frames, num_frames):
    mask_array = []
    for i in xrange(max_frames + 1):
      tmp = [0.0] * max_frames 
      for j in xrange(i):
        tmp[j] = 1.0
      mask_array.append(tmp)
    mask_array = np.array(mask_array)
    mask_init = tf.constant_initializer(mask_array)
    mask_emb = tf.get_variable("mask_emb", shape = [max_frames + 1, max_frames], 
            dtype = tf.float32, trainable = False, initializer = mask_init)
    mask = tf.nn.embedding_lookup(mask_emb, num_frames)
    return mask