Python tensorflow 模块,identity() 实例源码

我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用tensorflow.identity()

项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def value_transition(self, curr_state, next_symbols, batch_size):
        first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
        num_value_tokens = self.output_size - first_value_token
        with tf.name_scope('grammar_transition'):
            adjusted_next_symbols = tf.where(next_symbols >= self.num_control_tokens, next_symbols + (first_value_token - self.num_control_tokens), next_symbols)

            assert1 = tf.Assert(tf.reduce_all(tf.logical_and(next_symbols < num_value_tokens, next_symbols >= 0)), [curr_state, next_symbols])
            with tf.control_dependencies([assert1]):
                transitions = tf.gather(tf.constant(self.transition_matrix), curr_state)
            assert transitions.get_shape()[1:] == (self.output_size,)

            indices = tf.stack((tf.range(0, batch_size), adjusted_next_symbols), axis=1)
            next_state = tf.gather_nd(transitions, indices)

            assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [curr_state, adjusted_next_symbols, next_state])
            with tf.control_dependencies([assert2]):
                return tf.identity(next_state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def accumulate_strings(values, name="strings"):
  """Accumulates strings into a vector.

  Args:
    values: A 1-d string tensor that contains values to add to the accumulator.

  Returns:
    A tuple (value_tensor, update_op).
  """
  tf.assert_type(values, tf.string)
  strings = tf.Variable(
      name=name,
      initial_value=[],
      dtype=tf.string,
      trainable=False,
      collections=[],
      validate_shape=True)
  value_tensor = tf.identity(strings)
  update_op = tf.assign(
      ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
  return value_tensor, update_op
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def batch_norm_layer(self, to_be_normalized, is_training):
    if is_training:
      train_phase = tf.constant(1)
    else:
      train_phase = tf.constant(-1)
    beta = tf.Variable(tf.constant(0.0, shape=[to_be_normalized.shape[-1]]), name='beta', trainable=True)
    gamma = tf.Variable(tf.constant(1.0, shape=[to_be_normalized.shape[-1]]), name='gamma', trainable=True)
    # axises = np.arange(len(to_be_normalized.shape) - 1) # change to apply tensorflow 1.3
    axises = [0,1,2]

    print("start nn.moments")
    print("axises : " + str(axises))
    batch_mean, batch_var = tf.nn.moments(to_be_normalized, axises, name='moments')
    print("nn.moments successful")
    ema = tf.train.ExponentialMovingAverage(decay=0.5)

    def mean_var_with_update():
        ema_apply_op = ema.apply([batch_mean, batch_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)

    mean, var = tf.cond(train_phase > 0, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) # if is training --> update
    normed = tf.nn.batch_normalization(to_be_normalized, mean, var, beta, gamma, 1e-3)
    return normed
项目:US-image-prediction    作者:ChengruiWu008    | 项目源码 | 文件源码
def input_norm(xs):
    fc_mean, fc_var = tf.nn.moments(
        xs,
        axes=[0],
    )
    scale = tf.Variable(tf.ones([1]))
    shift = tf.Variable(tf.zeros([1]))
    epsilon = 0.001
    # apply moving average for mean and var when train on batch
    ema = tf.train.ExponentialMovingAverage(decay=0.5)

    def mean_var_with_update():
        ema_apply_op = ema.apply([fc_mean, fc_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(fc_mean), tf.identity(fc_var)

    mean, var = mean_var_with_update()
    xs = tf.nn.batch_normalization(xs, mean, var, shift, scale, epsilon)
    return xs
项目:US-image-prediction    作者:ChengruiWu008    | 项目源码 | 文件源码
def batch_norm(Wx_plus_b,out_size):
    fc_mean, fc_var = tf.nn.moments(
        Wx_plus_b,
        axes=[0],  # the dimension you wanna normalize, here [0] for batch
        # for image, you wanna do [0, 1, 2] for [batch, height, width] but not channel
    )
    scale = tf.Variable(tf.ones([out_size]))
    shift = tf.Variable(tf.zeros([out_size]))
    epsilon = 0.001
    # apply moving average for mean and var when train on batch
    ema = tf.train.ExponentialMovingAverage(decay=0.5)
    def mean_var_with_update():
        ema_apply_op = ema.apply([fc_mean, fc_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(fc_mean), tf.identity(fc_var)
    mean, var = mean_var_with_update()
    Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)
    return Wx_plus_b
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        if train:
            with tf.variable_scope(self.name) as scope:
                self.beta = tf.get_variable("beta", [shape[-1]],
                                    initializer=tf.constant_initializer(0.))
                self.gamma = tf.get_variable("gamma", [shape[-1]],
                                    initializer=tf.random_normal_initializer(1., 0.02))

                batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
                ema_apply_op = self.ema.apply([batch_mean, batch_var])
                self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)

                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
        else:
            mean, var = self.ema_mean, self.ema_var

        normed = tf.nn.batch_norm_with_global_normalization(
                x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)

        return normed

# standard convolution layer
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def global_pool(inp, kind='avg', keep_dims=False, name=None):
    if kind not in ['max', 'avg']:
        raise ValueError('Only global avg or max pool is allowed, but'
                            'you requested {}.'.format(kind))
    if name is None:
        name = 'global_{}_pool'.format(kind)
    h, w = inp.get_shape().as_list()[1:3]
    out = getattr(tf.nn, kind + '_pool')(inp,
                                    ksize=[1,h,w,1],
                                    strides=[1,1,1,1],
                                    padding='VALID')
    if keep_dims:
        output = tf.identity(out, name=name)
    else:
        output = tf.reshape(out, [out.get_shape().as_list()[0], -1], name=name)

    return output
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def convert(model_dir, keras_model_file, tf_model_file, name_output='s1_output', num_output=1):
    # Parameter False is for tf.keras in TF 1.4. For real Keras, use 0 as parameter
    keras.backend.set_learning_phase(False)
    keras_model = keras.models.load_model(os.path.join(model_dir, keras_model_file),
                                          custom_objects={'custom_loss': YoloNet.custom_loss})

    output = [None] * num_output
    out_node_names = [None] * num_output
    for i in range(num_output):
        out_node_names[i] = name_output + str(i)
        output[i] = tf.identity(keras_model.outputs[i], name=out_node_names[i])

    sess = keras.backend.get_session()
    constant_graph = tf.graph_util.convert_variables_to_constants(
        sess,
        sess.graph.as_graph_def(),
        out_node_names  # All other operations relying on this will also be saved
    )
    output_file = os.path.join(model_dir, tf_model_file)
    with tf.gfile.GFile(output_file, "wb") as f:
        f.write(constant_graph.SerializeToString())

    print("Converted model was saved as {}.".format(tf_model_file))
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build_input(self):
        self.init_default_options()
        inp_height = self.get_option('inp_height')
        inp_width = self.get_option('inp_width')
        inp_depth = self.get_option('inp_depth')
        x = self.add_input_var(
            'x', [None, inp_height, inp_width, inp_depth], 'float')
        x_id = tf.identity(x)
        self.register_var('x_id', x_id)
        y_gt = self.add_input_var('y_gt', [None, 10], 'float')
        phase_train = self.add_input_var('phase_train', None, 'bool')
        return {
            'x': x,
            'y_gt': y_gt,
            'phase_train': phase_train
        }
项目:GAN    作者:ilblackdragon    | 项目源码 | 文件源码
def autoencoder_model(feature, target, mode, params):
  """Autoencodes sequence model."""
  vocab_size = params.get('vocab_size')
  embed_dim = params.get('embed_dim')

  tf.identity(feature[0], name='feature')
  embed_feature = sequence.embed_features(
    feature, vocab_size=vocab_size, embed_dim=embed_dim)
  output, _ = sequence.sequence_autoencoder_discriminator(
    embed_feature, length=FLAGS.max_doc_length, hidden_size=embed_dim)
  logits, predictions = sequence.outbed_generated(output)

  # Loss and training.
  loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, feature)
  loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1))
  train_op = layers.optimize_loss(
      loss, tf.train.get_global_step(),
      learning_rate=params['learning_rate'],
      optimizer=params.get('optimizer', 'Adam'))
  return predictions, loss, train_op
项目:GAN    作者:ilblackdragon    | 项目源码 | 文件源码
def autoencoder_model(feature, target, mode, params):
  """Autoencodes features with given function."""
  autoencoder_fn = params.get('autoencoder_fn')
  feature_processor = params.get('feature_processor', lambda f: f)
  generated_postprocess = params.get('generated_postprocess', lambda f: f)

  # Process features.
  feature = feature_processor(feature)

  # Auto-encode.
  generated, _ = autoencoder_fn(feature)

  # Loss and training.
  loss = tf.contrib.losses.mean_squared_error(feature, generated)
  train_op = layers.optimize_loss(
      loss, tf.train.get_global_step(),
      learning_rate=params['learning_rate'],
      optimizer=params.get('optimizer', 'Adam'))

  # Post process generated.
  prediction = generated_postprocess(generated)
  prediction = tf.identity(prediction, name='generated')
  return prediction, loss, train_op
项目:Automatic-Image-Colorization    作者:Armour    | 项目源码 | 文件源码
def normalize(self, x, train=True):
        """
        Returns a batch-normalized version of x.
        """
        if train is not None:
            mean, variance = tf.nn.moments(x, [0, 1, 2])
            assign_mean = self.mean.assign(mean)
            assign_variance = self.variance.assign(variance)
            with tf.control_dependencies([assign_mean, assign_variance]):
                return tf.nn.batch_norm_with_global_normalization(x, mean, variance, self.beta, self.gamma, self.epsilon, self.scale_after_norm)
        else:
            mean = self.ewma_trainer.average(self.mean)
            variance = self.ewma_trainer.average(self.variance)
            local_beta = tf.identity(self.beta)
            local_gamma = tf.identity(self.gamma)
            return tf.nn.batch_norm_with_global_normalization(x, mean, variance, local_beta, local_gamma, self.epsilon, self.scale_after_norm)
项目:acdc_segmenter    作者:baumgach    | 项目源码 | 文件源码
def dense_layer_bn(bottom,
                   name,
                   training,
                   hidden_units=512,
                   activation=tf.nn.relu,
                   weight_init='he_normal'):

    '''
    Shortcut for batch normalised 2D dilated convolutional layer
    '''

    linact = dense_layer(bottom=bottom,
                         name=name,
                         hidden_units=hidden_units,
                         activation=tf.identity,
                         weight_init=weight_init,
                         add_bias=False)

    batchnorm = batch_normalisation_layer(linact, name + '_bn', training=training)
    act = activation(batchnorm)

    return act

### VARIABLE INITIALISERS ####################################################################################
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def batch_norm(self, X):
        train_phase = self.train_phase
        with tf.name_scope('bn'):
            n_out = X.get_shape()[-1:]
            beta = tf.Variable(tf.constant(0.0, shape=n_out), name='beta', trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=n_out), name='gamma', trainable=True)
            # batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            batch_mean, batch_var = tf.nn.moments(X, [0, 1, 2], name='moments')
            ema = tf.train.ExponentialMovingAverage(decay=0.5)

            def mean_var_with_update():
                ema_apply_op = ema.apply([batch_mean, batch_var])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(batch_mean), tf.identity(batch_var)

            mean, var = tf.cond(train_phase, mean_var_with_update,
                                lambda: (ema.average(batch_mean), ema.average(batch_var)))
            normed = tf.nn.batch_normalization(X, mean, var, beta, gamma, 1e-3)
        return normed
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def update_link_matrix(self, link_matrix_old, precedence_weighting_old, write_weighting):
        """
        Updating the link matrix takes some effort (in order to vectorize the implementation)
        Instead of the original index-by-index operation, it's all done at once.


        :param link_matrix_old: from previous time step, shape [batch_size, memory_size, memory_size]
        :param precedence_weighting_old: from previous time step, shape [batch_size, memory_size]
        :param write_weighting: from current time step, shape [batch_size, memory_size]
        :return: updated link matrix
        """
        expanded = tf.expand_dims(write_weighting, axis=2)

        # vectorizing the paper's original implementation
        w = tf.tile(expanded, [1, 1, self.memory_size])  # shape [batch_size, memory_size, memory_size]
        # shape of w_transpose is the same: [batch_size, memory_size, memory_size]
        w_transp = tf.tile(tf.transpose(expanded, [0, 2, 1]), [1, self.memory_size, 1])

        # in einsum, m and n are the same dimension because tensorflow doesn't support duplicated subscripts. Why?
        lm = (1 - w - w_transp) * link_matrix_old + tf.einsum("bn,bm->bmn", precedence_weighting_old, write_weighting)
        lm *= (1 - tf.eye(self.memory_size, batch_shape=[self.batch_size]))  # making sure self links are off
        return tf.identity(lm, name="Link_matrix")
项目:WassersteinGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5, stddev=0.02):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, stddev),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def build_prediction_graph(self):
    """Builds prediction graph and registers appropriate endpoints."""

    tensors = self.build_graph(None, 1, GraphMod.PREDICT)

    keys_placeholder = tf.placeholder(tf.string, shape=[None])
    inputs = {
        'key': keys_placeholder,
        'image_bytes': tensors.input_jpeg
    }

    # To extract the id, we need to add the identity function.
    keys = tf.identity(keys_placeholder)
    outputs = {
        'key': keys,
        'prediction': tensors.predictions[0],
        'scores': tensors.predictions[1]
    }

    return inputs, outputs
项目:Magic-Pixel    作者:zhwhong    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        if train:
            with tf.variable_scope(self.name) as scope:
                self.beta = tf.get_variable("beta", [shape[-1]],
                                    initializer=tf.constant_initializer(0.))
                self.gamma = tf.get_variable("gamma", [shape[-1]],
                                    initializer=tf.random_normal_initializer(1., 0.02))

                batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
                ema_apply_op = self.ema.apply([batch_mean, batch_var])
                self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)

                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
        else:
            mean, var = self.ema_mean, self.ema_var

        normed = tf.nn.batch_norm_with_global_normalization(
                x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)

        return normed
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def gauss_KL(mu1, logstd1, mu2, logstd2):
    """ Returns KL divergence among two multivariate Gaussians, component-wise.

    It assumes the covariance matrix is diagonal. All inputs have shape (n,a).
    It is not necessary to know the number of actions because reduce_sum will
    sum over this to get the `d` constant offset. The part consisting of the
    trace in the formula is blended with the mean difference squared due to the
    common "denominator" of var2_na.  This forumula generalizes for an arbitrary
    number of actions.  I think mu2 and logstd2 should represent the policy
    before the update.

    Returns the KL divergence for each of the n components in the minibatch,
    then we do a reduce_mean outside this.
    """
    var1_na = tf.exp(2.*logstd1)
    var2_na = tf.exp(2.*logstd2)
    tmp_matrix = 2.*(logstd2 - logstd1) + (var1_na + tf.square(mu1-mu2))/var2_na - 1
    kl_n = tf.reduce_sum(0.5 * tmp_matrix, axis=[1]) # Don't forget the 1/2 !!
    assert_op = tf.Assert(tf.reduce_all(kl_n >= -0.0000001), [kl_n]) 
    with tf.control_dependencies([assert_op]):
        kl_n = tf.identity(kl_n)
    return kl_n
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_session_run(self):
        with self.test_session(use_gpu=True) as sess:
            samples = tf.constant([1, 2, 3])
            log_probs = Mock()
            probs = Mock()
            sample_func = Mock(return_value=samples)
            log_prob_func = Mock(return_value=log_probs)
            prob_func = Mock(return_value=probs)
            distribution = Mock(sample=sample_func,
                                log_prob=log_prob_func,
                                prob=prob_func,
                                dtype=tf.int32)

            # test session.run
            t = StochasticTensor('t', distribution, 1, samples)
            self.assertAllEqual(sess.run(t), np.asarray([1, 2, 3]))

            # test using as feed dict
            self.assertAllEqual(
                sess.run(tf.identity(t), feed_dict={
                    t: np.asarray([4, 5, 6])
                }),
                np.asarray([4, 5, 6])
            )
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def tune(self, acceptance_rate, fresh_start):
        def adapt_stepsize():
            new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
            rate1 = tf.div(1.0, new_step + self.t0)
            new_h_bar = tf.assign(
                self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
                rate1 * (self.delta - acceptance_rate))
            log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
            rate = tf.pow(new_step, -self.kappa)
            new_log_epsilon_bar = tf.assign(
                self.log_epsilon_bar,
                rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
                self.log_epsilon_bar)
            with tf.control_dependencies([new_log_epsilon_bar]):
                new_log_epsilon = tf.identity(log_epsilon)

            return tf.exp(new_log_epsilon)

        c = tf.cond(self.adapt_step_size,
                    adapt_stepsize,
                    lambda: tf.exp(self.log_epsilon_bar))

        return c
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def assert_rank_at_least(tensor, k, name):
    """
    Whether the rank of `tensor` is at least k.

    :param tensor: A tensor to be checked.
    :param k: The least rank allowed.
    :param name: The name of `tensor` for error message.
    :return: The checked tensor.
    """
    static_shape = tensor.get_shape()
    shape_err_msg = '{} should have rank >= {}.'.format(name, k)
    if static_shape and (static_shape.ndims < k):
        raise ValueError(shape_err_msg)
    if not static_shape:
        _assert_shape_op = tf.assert_rank_at_least(
            tensor, k, message=shape_err_msg)
        with tf.control_dependencies([_assert_shape_op]):
            tensor = tf.identity(tensor)
    return tensor
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def assert_scalar(tensor, name):
    """
    Whether the `tensor` is a scalar (0-D tensor).

    :param tensor: A tensor to be checked.
    :param name: The name of `tensor` for error message.
    :return: The checked tensor.
    """
    static_shape = tensor.get_shape()
    shape_err_msg = name + " should be a scalar (0-D tensor)."
    if static_shape and (static_shape.ndims >= 1):
        raise ValueError(shape_err_msg)
    else:
        _assert_shape_op = tf.assert_rank(tensor, 0, message=shape_err_msg)
        with tf.control_dependencies([_assert_shape_op]):
            tensor = tf.identity(tensor)
        return tensor
项目:deep-learning-nd    作者:RyanCCollins    | 项目源码 | 文件源码
def test_get_init_cell(get_init_cell):
    with tf.Graph().as_default():
        test_batch_size_ph = tf.placeholder(tf.int32)
        test_rnn_size = 256

        cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)

        # Check type
        assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
            'Cell is wrong type.  Found {} type'.format(type(cell))

        # Check for name attribute
        assert hasattr(init_state, 'name'),\
            'Initial state doesn\'t have the "name" attribute.  Try using `tf.identity` to set the name.'

        # Check name
        assert init_state.name == 'initial_state:0',\
            'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)

    _print_success_message()
项目:deep-learning-nd    作者:RyanCCollins    | 项目源码 | 文件源码
def test_build_rnn(build_rnn):
    with tf.Graph().as_default():
        test_rnn_size = 256
        test_rnn_layer_size = 2
        test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size)] * test_rnn_layer_size)

        test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
        outputs, final_state = build_rnn(test_cell, test_inputs)

        # Check name
        assert hasattr(final_state, 'name'),\
            'Final state doesn\'t have the "name" attribute.  Try using `tf.identity` to set the name.'
        assert final_state.name == 'final_state:0',\
            'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)

        # Check shape
        assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
            'Outputs has wrong shape.  Found shape {}'.format(outputs.get_shape())
        assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
            'Final state wrong shape.  Found shape {}'.format(final_state.get_shape())

    _print_success_message()
项目:AutoPortraitMatting    作者:PetroWu    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:AutoPortraitMatting    作者:PetroWu    | 项目源码 | 文件源码
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
    """
    Code taken from http://stackoverflow.com/a/34634291/2267819
    """
    with tf.variable_scope(scope):
        beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
                               , trainable=True)
        gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
                                trainable=True)
        batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(phase_train,
                            mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
    return normed
项目:tensorflow-yolo    作者:hjimce    | 项目源码 | 文件源码
def build_forward(self):
        verbalise = self.FLAGS.verbalise

        # Placeholders
        inp_size = [None] + self.meta['inp_size']
        self.inp = tf.placeholder(tf.float32, inp_size, 'input')
        self.feed = dict() # other placeholders

        # Build the forward pass
        state = identity(self.inp)
        roof = self.num_layer - self.ntrain
        self.say(HEADER, LINE)
        for i, layer in enumerate(self.darknet.layers):
            scope = '{}-{}'.format(str(i),layer.type)
            args = [layer, state, i, roof, self.feed]
            state = op_create(*args)
            mess = state.verbalise()
            self.say(mess)
        self.say(LINE)

        self.top = state
        self.out = tf.identity(state.out, name='output')
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def create_q_network(self,state_dim,action_dim):
        # the layer size could be changed
        layer1_size = LAYER1_SIZE
        layer2_size = LAYER2_SIZE

        state_input = tf.placeholder("float",[None,state_dim])
        action_input = tf.placeholder("float",[None,action_dim])

        W1 = self.variable([state_dim,layer1_size],state_dim)
        b1 = self.variable([layer1_size],state_dim)
        W2 = self.variable([layer1_size,layer2_size],layer1_size+action_dim)
        W2_action = self.variable([action_dim,layer2_size],layer1_size+action_dim)
        b2 = self.variable([layer2_size],layer1_size+action_dim)
        W3 = tf.Variable(tf.random_uniform([layer2_size,1],-3e-3,3e-3))
        b3 = tf.Variable(tf.random_uniform([1],-3e-3,3e-3))

        layer1 = tf.nn.relu(tf.matmul(state_input,W1) + b1)
        layer2 = tf.nn.relu(tf.matmul(layer1,W2) + tf.matmul(action_input,W2_action) + b2)
        q_value_output = tf.identity(tf.matmul(layer2,W3) + b3)

        return state_input,action_input,q_value_output,[W1,b1,W2,W2_action,b2,W3,b3]
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def create_q_network(self,state_dim,action_dim,scope):
    with tf.variable_scope(scope):
        # the layer size could be changed
            layer1_size = LAYER1_SIZE
            layer2_size = LAYER2_SIZE

            state_input = tf.placeholder("float",[None,state_dim])
            action_input = tf.placeholder("float",[None,action_dim])

            W1 = self.variable([state_dim,layer1_size],state_dim)
            b1 = self.variable([layer1_size],state_dim)
            W2 = self.variable([layer1_size,layer2_size],layer1_size+action_dim)
            W2_action = self.variable([action_dim,layer2_size],layer1_size+action_dim)
            b2 = self.variable([layer2_size],layer1_size+action_dim)
            W3 = tf.Variable(tf.random_uniform([layer2_size,1],-3e-3,3e-3))
            b3 = tf.Variable(tf.random_uniform([1],-3e-3,3e-3))

            layer1 = tf.nn.relu(tf.matmul(state_input,W1) + b1)
            layer2 = tf.nn.relu(tf.matmul(layer1,W2) + tf.matmul(action_input,W2_action) + b2)
            q_value_output = tf.identity(tf.matmul(layer2,W3) + b3)

            return state_input,action_input,q_value_output,[W1,b1,W2,W2_action,b2,W3,b3]
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:chi    作者:rmst    | 项目源码 | 文件源码
def deep_q_network():
    """ Architecture according to:
    http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html
    """
    @tt.model(tracker=tf.train.ExponentialMovingAverage(1 - .0005),    # TODO: replace with original weight freeze
                         optimizer=tf.train.RMSPropOptimizer(.00025, .95, .95, .01))
    def q_network(x):
        x /= 255
        x = layers.conv2d(x, 32, 8, 4)
        x = layers.conv2d(x, 64, 4, 2)
        x = layers.conv2d(x, 64, 3, 1)
        x = layers.flatten(x)
        x = layers.fully_connected(x, 512)
        x = layers.fully_connected(x, env.action_space.n, activation_fn=None)
        x = tf.identity(x, name='Q')
        return x

    return q_network
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testGetSaverPartitioned(self, save_partitioned, load_partitioned):
    path = os.path.join(tempfile.mkdtemp(), "ckpt")

    # Save checkpoint.
    with self.test_session() as sess:
      conv = self._create_conv(partitioned=save_partitioned, name="a")
      saver = snt.get_saver(conv)
      sess.run(tf.global_variables_initializer())
      saver.save(sess, path)
      w = tf.identity(conv.w)
      w_value = sess.run(w)

    # Restore checkpoint.
    with self.test_session() as sess:
      conv = self._create_conv(partitioned=load_partitioned, name="b")
      saver = snt.get_saver(conv)
      saver.restore(sess, path)
      w = tf.identity(conv.w)
      self.assertAllEqual(sess.run(w), w_value)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testTupleSelect(self):
    """Test where idx is a tuple."""
    shape0 = [1, 2]
    shape1 = [1, 2, 3]
    shape2 = [1, 2, 3, 4]
    input0 = tf.random_uniform(shape=shape0)
    input1 = tf.random_uniform(shape=shape1)
    input2 = tf.random_uniform(shape=shape2)

    mod = snt.SelectInput(idx=(0, 2))
    output = mod(input0, input1, input2)
    output0 = tf.identity(input0)
    output2 = tf.identity(input2)

    with self.test_session() as sess:
      out = sess.run([output, [output0, output2]])
      self.assertAllEqual(out[0][0], out[1][0])
      self.assertAllEqual(out[0][1], out[1][1])
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testNestedListSelect(self):
    """Test where idx is a nested list."""
    shape0 = [1, 2]
    shape1 = [1, 2, 3]
    shape2 = [1, 2, 3, 4]
    input0 = tf.random_uniform(shape=shape0)
    input1 = tf.random_uniform(shape=shape1)
    input2 = tf.random_uniform(shape=shape2)

    mod = snt.SelectInput(idx=[2, [1, 0, 1]])
    output = mod(input0, input1, input2)
    output0 = tf.identity(input0)
    output1 = tf.identity(input1)
    output2 = tf.identity(input2)

    with self.test_session() as sess:
      out = sess.run([output, [output2, [output1, output0, output1]]])
      self.assertAllEqual(out[0][0], out[1][0])
      self.assertAllEqual(out[0][1][0], out[1][1][0])
      self.assertAllEqual(out[0][1][1], out[1][1][1])
      self.assertAllEqual(out[0][1][2], out[1][1][2])
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def accumulate_strings(values, name="strings"):
  """Accumulates strings into a vector.

  Args:
    values: A 1-d string tensor that contains values to add to the accumulator.

  Returns:
    A tuple (value_tensor, update_op).
  """
  tf.assert_type(values, tf.string)
  strings = tf.Variable(
      name=name,
      initial_value=[],
      dtype=tf.string,
      trainable=False,
      collections=[],
      validate_shape=True)
  value_tensor = tf.identity(strings)
  update_op = tf.assign(
      ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
  return value_tensor, update_op
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def get_function_init_state(self, function_tokens):
        next_state = tf.gather(self.function_states, function_tokens - (self.num_begin_tokens + self.num_control_tokens))
        assert2 = tf.Assert(tf.reduce_all(next_state >= 0), [function_tokens])
        with tf.control_dependencies([assert2]):
            return tf.identity(next_state)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def add_loss_op(self, result):
        logits = result.rnn_output
        with tf.control_dependencies([tf.assert_positive(tf.shape(logits)[1], data=[tf.shape(logits)])]):
            length_diff = tf.reshape(self.config.max_length - tf.shape(logits)[1], shape=(1,))
        padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0, 0]], axis=0), shape=(3, 2))
        preds = tf.pad(logits, padding, mode='constant')

        # add epsilon to avoid division by 0
        preds = preds + 1e-5

        mask = tf.sequence_mask(self.output_length_placeholder, self.config.max_length, dtype=tf.float32)
        loss = tf.contrib.seq2seq.sequence_loss(preds, self.output_placeholder, mask)

        with tf.control_dependencies([tf.assert_non_negative(loss, data=[preds, mask], summarize=256*60*300)]):
            return tf.identity(loss)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def __call__(self, x, train=True):
        shape = x.get_shape().as_list()

        with tf.variable_scope(self.name) as scope:
            self.beta = tf.get_variable("beta", shape[1:],
                                        initializer=tf.constant_initializer(0.))
            self.gamma = tf.get_variable("gamma", shape[1:],
                                         initializer=tf.random_normal_initializer(1.,0.02))
            self.mean = tf.get_variable("mean", shape[1:],
                                        initializer=tf.constant_initializer(0.),trainable=False)
            self.variance = tf.get_variable("variance",shape[1:],
                                            initializer=tf.constant_initializer(1.),trainable=False)
            if train:
                batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')

                self.mean.assign(batch_mean)
                self.variance.assign(batch_var)
                ema_apply_op = self.ema.apply([self.mean, self.variance])
                with tf.control_dependencies([ema_apply_op]):
                    mean, var = tf.identity(batch_mean), tf.identity(batch_var)
            else:
                mean, var = self.ema.average(self.mean), self.ema.average(self.variance)

            normed = tf.nn.batch_normalization(x, mean, var, self.beta, self.gamma, self.epsilon)

        return normed
项目:MMD-Variational-Autoencoder    作者:ShengjiaZhao    | 项目源码 | 文件源码
def conv2d_lrelu(inputs, num_outputs, kernel_size, stride):
    conv = tf.contrib.layers.convolution2d(inputs, num_outputs, kernel_size, stride,
                                           weights_initializer=tf.contrib.layers.xavier_initializer(),
                                           activation_fn=tf.identity)
    conv = lrelu(conv)
    return conv
项目:MMD-Variational-Autoencoder    作者:ShengjiaZhao    | 项目源码 | 文件源码
def conv2d_t_relu(inputs, num_outputs, kernel_size, stride):
    conv = tf.contrib.layers.convolution2d_transpose(inputs, num_outputs, kernel_size, stride,
                                                     weights_initializer=tf.contrib.layers.xavier_initializer(),
                                                     activation_fn=tf.identity)
    conv = tf.nn.relu(conv)
    return conv
项目:MMD-Variational-Autoencoder    作者:ShengjiaZhao    | 项目源码 | 文件源码
def fc_lrelu(inputs, num_outputs):
    fc = tf.contrib.layers.fully_connected(inputs, num_outputs,
                                           weights_initializer=tf.contrib.layers.xavier_initializer(),
                                           activation_fn=tf.identity)
    fc = lrelu(fc)
    return fc