Python tensorflow 模块,assign() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.assign()

项目:GYM_DRL    作者:Kyushik    | 项目源码 | 文件源码
def assign_network_to_target():
    update_wfc1 = tf.assign(w_fc1_target, w_fc1)
    update_wfc2 = tf.assign(w_fc2_target, w_fc2)
    update_wfc3 = tf.assign(w_fc3_target, w_fc3)
    update_bfc1 = tf.assign(b_fc1_target, b_fc1)
    update_bfc2 = tf.assign(b_fc2_target, b_fc2)
    update_bfc3 = tf.assign(b_fc3_target, b_fc3)

    sess.run(update_wfc1)
    sess.run(update_wfc2)
    sess.run(update_wfc3)
    sess.run(update_bfc1)
    sess.run(update_bfc2)
    sess.run(update_bfc3)

# Input
项目:GYM_DRL    作者:Kyushik    | 项目源码 | 文件源码
def assign_network_to_target():
    update_wfc1 = tf.assign(w_fc1_target, w_fc1)
    update_wfc2 = tf.assign(w_fc2_target, w_fc2)
    update_wfc3 = tf.assign(w_fc3_target, w_fc3)
    update_bfc1 = tf.assign(b_fc1_target, b_fc1)
    update_bfc2 = tf.assign(b_fc2_target, b_fc2)
    update_bfc3 = tf.assign(b_fc3_target, b_fc3)

    sess.run(update_wfc1)
    sess.run(update_wfc2)
    sess.run(update_wfc3)
    sess.run(update_bfc1)
    sess.run(update_bfc2)
    sess.run(update_bfc3)

# Input
项目:GYM_DRL    作者:Kyushik    | 项目源码 | 文件源码
def xavier_initializer(shape):
    dim_sum = np.sum(shape)
    if len(shape) == 1:
        dim_sum += 1
    bound = np.sqrt(2.0 / dim_sum)
    return tf.random_uniform(shape, minval=-bound, maxval=bound)

# # Assigning network variables to target network variables 
# def assign_network_to_target():
#   update_wfc = tf.assign(w_fc_target, w_fc)
#   update_bfc = tf.assign(b_fc_target, b_fc)

#   sess.run(update_wfc)
#   sess.run(update_bfc)

#   cell_target = cell 

# Input
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_video_weights(video_id_batch):
  video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file(
                          vocabulary_file=FLAGS.sample_vocab_file, default_value=0)
  indexes = video_id_to_index.lookup(video_id_batch)
  weights, length = get_video_weights_array()
  weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input")
  weights_tensor = tf.get_variable("sample_weights",
                               shape=[length],
                               trainable=False,
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(weights))
  weights_assignment = tf.assign(weights_tensor, weights_input)

  tf.add_to_collection("weights_input", weights_input)
  tf.add_to_collection("weights_assignment", weights_assignment)

  video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes)
  return video_weight_batch
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def batchnorm(x, name, phase, updates, gamma=0.96):
    k = x.get_shape()[1]
    runningmean = tf.get_variable(name+"/mean", shape=[1, k], initializer=tf.constant_initializer(0.0), trainable=False)
    runningvar = tf.get_variable(name+"/var", shape=[1, k], initializer=tf.constant_initializer(1e-4), trainable=False)
    testy = (x - runningmean) / tf.sqrt(runningvar)

    mean_ = mean(x, axis=0, keepdims=True)
    var_ = mean(tf.square(x), axis=0, keepdims=True)
    std = tf.sqrt(var_)
    trainy = (x - mean_) / std

    updates.extend([
        tf.assign(runningmean, runningmean * gamma + mean_ * (1 - gamma)),
        tf.assign(runningvar, runningvar * gamma + var_ * (1 - gamma))
    ])

    y = switch(phase, trainy, testy)

    out = y * tf.get_variable(name+"/scaling", shape=[1, k], initializer=tf.constant_initializer(1.0), trainable=True)\
            + tf.get_variable(name+"/translation", shape=[1,k], initializer=tf.constant_initializer(0.0), trainable=True)
    return out

# ================================================================
# Mathematical utils
# ================================================================
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def accumulate_strings(values, name="strings"):
  """Accumulates strings into a vector.

  Args:
    values: A 1-d string tensor that contains values to add to the accumulator.

  Returns:
    A tuple (value_tensor, update_op).
  """
  tf.assert_type(values, tf.string)
  strings = tf.Variable(
      name=name,
      initial_value=[],
      dtype=tf.string,
      trainable=False,
      collections=[],
      validate_shape=True)
  value_tensor = tf.identity(strings)
  update_op = tf.assign(
      ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
  return value_tensor, update_op
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def fix_variables(self, sess, pretrained_model):
    print('Fix VGG16 layers..')
    with tf.variable_scope('Fix_VGG16') as scope:
      with tf.device("/cpu:0"):
        # fix the vgg16 issue from conv weights to fc weights
        # fix RGB to BGR
        fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False)
        fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False)
        conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False)
        restorer_fc = tf.train.Saver({self._scope + "/fc6/weights": fc6_conv, 
                                      self._scope + "/fc7/weights": fc7_conv,
                                      self._scope + "/conv1/conv1_1/weights": conv1_rgb})
        restorer_fc.restore(sess, pretrained_model)

        sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc6/weights:0'], tf.reshape(fc6_conv, 
                            self._variables_to_fix[self._scope + '/fc6/weights:0'].get_shape())))
        sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc7/weights:0'], tf.reshape(fc7_conv, 
                            self._variables_to_fix[self._scope + '/fc7/weights:0'].get_shape())))
        sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/conv1_1/weights:0'], 
                            tf.reverse(conv1_rgb, [2])))
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def build_train_op(self):
        config=self.config

        self.g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.g_loss, var_list=self.g_vars)

        self.d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.d_loss, var_list=self.d_vars)

        self.d_label_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.d_labelLossReal, var_list=self.dl_vars)

        self.d_gen_label_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.g_lossLabels_GLabeler, var_list=self.dl_gen_vars)

        self.d_on_z_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
                  .minimize(self.g_loss_on_z + self.rec_loss_coeff*self.real_reconstruction_loss, var_list=self.dz_vars)

        self.k_t_update = tf.assign(self.k_t, self.k_t*tf.exp(-1.0/config.tau) )

        self.train_op=tf.group(self.d_gen_label_optim,self.d_label_optim,self.d_optim,self.g_optim,self.d_on_z_optim)
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def apply_gradients(self, grads_and_vars, global_step=None):
        """Apply gradients to model variables specified in `grads_and_vars`.

        `apply_gradients` returns an op that calls
        `tf.train.Optimizer.apply_gradients` and then zeros the gradient
        variables stored in `self.grads_and_vars`.

        Args:
            grads_and_vars (list): Description.
            global_step (None, optional): tensorflow global_step variable.

        Returns:
            (tf.Operation): Applies gradient update to model followed by an
                internal gradient zeroing operation to `self.grads_and_vars`.

        """
        self.mini_flag = tf.assign(self.mini_flag, tf.constant([0], dtype = tf.float32))
        # grads_and_vars = self.aggregate_gradients(grads_and_vars, method='average')
        with tf.control_dependencies([self.mini_flag]):
            optimize = self._optimizer.apply_gradients(grads_and_vars,
                                                       global_step=global_step)
        #return [optimize, self.zero_grad()]
        return optimize
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def load_trainable_vars(sess,filename):
    """load a .npz archive and assign the value of each loaded
    ndarray to the trainable variable whose name matches the
    archive key.  Any elements in the archive that do not have
    a corresponding trainable variable will be returned in a dict.
    """
    other={}
    try:
        tv=dict([ (str(v.name),v) for v in tf.trainable_variables() ])
        for k,d in np.load(filename).items():
            if k in tv:
                print('restoring ' + k)
                sess.run(tf.assign( tv[k], d) )
            else:
                other[k] = d
    except IOError:
        pass
    return other
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def learn(self):
        # hard replace parameters
        if self.a_replace_counter % REPLACE_ITER_A == 0:
            self.sess.run([tf.assign(t, e) for t, e in zip(self.at_params, self.ae_params)])
        if self.c_replace_counter % REPLACE_ITER_C == 0:
            self.sess.run([tf.assign(t, e) for t, e in zip(self.ct_params, self.ce_params)])
        self.a_replace_counter += 1; self.c_replace_counter += 1

        indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
        bt = self.memory[indices, :]
        bs = bt[:, :self.s_dim]
        ba = bt[:, self.s_dim: self.s_dim + self.a_dim]
        br = bt[:, -self.s_dim - 1: -self.s_dim]
        bs_ = bt[:, -self.s_dim:]

        self.sess.run(self.atrain, {self.S: bs})
        self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})
项目:DRLModule    作者:halleanwoo    | 项目源码 | 文件源码
def update_prmt_dqn(scope_main):
    q_prmts = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES ,  scope_main + "/q_network"  )
    target_prmts = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope_main + "/target_network"  )
    sess.run( [tf.assign(t , q)for t,q in zip(target_prmts , q_prmts)])  #***
    print("updating target-network parmeters...")

#
# def local2global():


# def global2local():




# ========= Error Raise =========
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def batch_set_value(tuples):
    '''Sets the values of many tensor variables at once.

    # Arguments
        tuples: a list of tuples `(tensor, value)`.
            `value` should be a Numpy array.
    '''
    if tuples:
        assign_ops = []
        feed_dict = {}
        for x, value in tuples:
            value = np.asarray(value)
            tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
            if hasattr(x, '_assign_placeholder'):
                assign_placeholder = x._assign_placeholder
                assign_op = x._assign_op
            else:
                assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape)
                assign_op = x.assign(assign_placeholder)
                x._assign_placeholder = assign_placeholder
                x._assign_op = assign_op
            assign_ops.append(assign_op)
            feed_dict[assign_placeholder] = value
        get_session().run(assign_ops, feed_dict=feed_dict)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def __init__(self, inputs, outputs, updates=[]):
        assert type(inputs) in {list, tuple}, 'Input to a TensorFlow backend function should be a list or tuple.'
        assert type(outputs) in {list, tuple}, 'Output to a TensorFlow backend function should be a list or tuple.'
        assert type(updates) in {list, tuple}, 'Updates in a TensorFlow backend function should be a list or tuple.'
        self.inputs = list(inputs)
        self.outputs = list(outputs)
        with tf.control_dependencies(self.outputs):
            updates_ops = []
            for update in updates:
                if type(update) is tuple:
                    p, new_p = update
                    updates_ops.append(tf.assign(p, new_p))
                else:
                    # assumed already an op
                    updates_ops.append(update)
            self.updates_op = tf.group(*updates_ops)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def set_param_values(self, flattened_params, **tags):
        debug = tags.pop("debug", False)
        param_values = unflatten_tensors(
            flattened_params, self.get_param_shapes(**tags))
        ops = []
        feed_dict = dict()
        for param, dtype, value in zip(
                self.get_params(**tags),
                self.get_param_dtypes(**tags),
                param_values):
            if param not in self._cached_assign_ops:
                assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype)
                assign_op = tf.assign(param, assign_placeholder)
                self._cached_assign_ops[param] = assign_op
                self._cached_assign_placeholders[param] = assign_placeholder
            ops.append(self._cached_assign_ops[param])
            feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
            if debug:
                print("setting value of %s" % param.name)
        tf.get_default_session().run(ops, feed_dict=feed_dict)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def predict_sym(self, xs):
        return L.get_output(self.l_out, xs)

    # def fit(self, xs, ys):
    #     if self._normalize_inputs:
    #         # recompute normalizing constants for inputs
    #         new_mean = np.mean(xs, axis=0, keepdims=True)
    #         new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
    #         tf.get_default_session().run(tf.group(
    #             tf.assign(self._x_mean_var, new_mean),
    #             tf.assign(self._x_std_var, new_std),
    #         ))
    #         inputs = [xs, ys]
    #     loss_before = self._optimizer.loss(inputs)
    #     if self._name:
    #         prefix = self._name + "_"
    #     else:
    #         prefix = ""
    #     logger.record_tabular(prefix + 'LossBefore', loss_before)
    #     self._optimizer.optimize(inputs)
    #     loss_after = self._optimizer.loss(inputs)
    #     logger.record_tabular(prefix + 'LossAfter', loss_after)
    #     logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def batch_norm_wrapper(inputs, is_training, decay = 0.999):
    scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))
    beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
    pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
    pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)

    if is_training is not None:
        batch_mean, batch_var = tf.nn.moments(inputs,[0])
        train_mean = tf.assign(pop_mean,
                               pop_mean * decay + batch_mean * (1 - decay))
        train_var = tf.assign(pop_var,
                              pop_var * decay + batch_var * (1 - decay))
        with tf.control_dependencies([train_mean, train_var]):
            return tf.nn.batch_normalization(inputs,
                train_mean, train_var, beta, scale, epsilon)
    else:
        return tf.nn.batch_normalization(inputs,
            pop_mean, pop_var, beta, scale, epsilon)

## regularization parameter
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def batch_norm_wrapper(inputs, is_training, decay = 0.999):
    scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))
    beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))
    pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
    pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)

    if is_training is not None:
        batch_mean, batch_var = tf.nn.moments(inputs,[0])
        train_mean = tf.assign(pop_mean,
                               pop_mean * decay + batch_mean * (1 - decay))
        train_var = tf.assign(pop_var,
                              pop_var * decay + batch_var * (1 - decay))
        with tf.control_dependencies([train_mean, train_var]):
            return tf.nn.batch_normalization(inputs,
                train_mean, train_var, beta, scale, epsilon)
    else:
        return tf.nn.batch_normalization(inputs,
            pop_mean, pop_var, beta, scale, epsilon)

## regularization parameter
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def _create(self):
        d_loss = gan.graph.d_loss
        g_loss = gan.graph.g_loss
        g_lr = np.float32(config.g_learn_rate)
        d_lr = np.float32(config.d_learn_rate)

        gan.graph.d_vars = d_vars
        g_defk = {k[2:]: v for k, v in config.items() if k[2:] in inspect.getargspec(config.g_trainer).args and k.startswith("d_")}
        d_defk = {k[2:]: v for k, v in config.items() if k[2:] in inspect.getargspec(config.d_trainer).args and k.startswith("g_")}
        g_optimizer = config.g_trainer(g_lr, **g_defk)
        d_optimizer = config.d_trainer(d_lr, **d_defk)
        if(config.clipped_gradients):
            g_optimizer = capped_optimizer(g_optimizer, config.clipped_gradients, g_loss, g_vars)
            d_optimizer = capped_optimizer(d_optimizer, config.clipped_gradients, d_loss, d_vars)
        else:
            g_optimizer = g_optimizer.minimize(g_loss, var_list=g_vars)
            d_optimizer = d_optimizer.minimize(d_loss, var_list=d_vars)

        gan.graph.clip = [tf.assign(d,tf.clip_by_value(d, -config.d_clipped_weights, config.d_clipped_weights))  for d in d_vars]

        return g_optimizer, d_optimizer
项目:baselines    作者:openai    | 项目源码 | 文件源码
def setup_popart(self):
        # See https://arxiv.org/pdf/1602.07714.pdf for details.
        self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std')
        new_std = self.ret_rms.std
        self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean')
        new_mean = self.ret_rms.mean

        self.renormalize_Q_outputs_op = []
        for vs in [self.critic.output_vars, self.target_critic.output_vars]:
            assert len(vs) == 2
            M, b = vs
            assert 'kernel' in M.name
            assert 'bias' in b.name
            assert M.get_shape()[-1] == 1
            assert b.get_shape()[-1] == 1
            self.renormalize_Q_outputs_op += [M.assign(M * self.old_std / new_std)]
            self.renormalize_Q_outputs_op += [b.assign((b * self.old_std + self.old_mean - new_mean) / new_std)]
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, log_prior, log_joint, prior_sampler,
                 hmc, observed, latent, n_chains=25, n_temperatures=1000,
                 verbose=False):
        # Shape of latent: [chain_axis, num_data, data dims]
        # Construct the tempered objective
        self.n_chains = n_chains
        self.n_temperatures = n_temperatures
        self.verbose = verbose

        with tf.name_scope("AIS"):
            self.temperature = tf.placeholder(tf.float32, shape=[],
                                              name="temperature")

            def log_fn(observed):
                return log_prior(observed) * (1 - self.temperature) + \
                    log_joint(observed) * self.temperature

            self.log_fn = log_fn
            self.log_fn_val = log_fn(merge_dicts(observed, latent))
            self.sample_op, self.hmc_info = hmc.sample(
                log_fn, observed, latent)
            self.init_latent = [tf.assign(z, z_s)
                                for z, z_s in zip(latent.values(),
                                                  prior_sampler.values())]
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def tune(self, acceptance_rate, fresh_start):
        def adapt_stepsize():
            new_step = tf.assign(self.step, (1 - fresh_start) * self.step + 1)
            rate1 = tf.div(1.0, new_step + self.t0)
            new_h_bar = tf.assign(
                self.h_bar, (1 - fresh_start) * (1 - rate1) * self.h_bar +
                rate1 * (self.delta - acceptance_rate))
            log_epsilon = self.mu - tf.sqrt(new_step) / self.gamma * new_h_bar
            rate = tf.pow(new_step, -self.kappa)
            new_log_epsilon_bar = tf.assign(
                self.log_epsilon_bar,
                rate * log_epsilon + (1 - fresh_start) * (1 - rate) *
                self.log_epsilon_bar)
            with tf.control_dependencies([new_log_epsilon_bar]):
                new_log_epsilon = tf.identity(log_epsilon)

            return tf.exp(new_log_epsilon)

        c = tf.cond(self.adapt_step_size,
                    adapt_stepsize,
                    lambda: tf.exp(self.log_epsilon_bar))

        return c
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def update(self, x):
        # x: (chain_dims data_dims)
        new_t = tf.assign(self.t, self.t + 1)
        weight = (1 - self.decay) / (1 - tf.pow(self.decay, new_t))
        # incr: (chain_dims data_dims)
        incr = [weight * (q - mean) for q, mean in zip(x, self.mean)]
        # mean: (1,...,1 data_dims)
        update_mean = [mean.assign_add(
            tf.reduce_mean(i, axis=self.chain_axes, keep_dims=True))
            for mean, i in zip(self.mean, incr)]
        # var: (1,...,1 data_dims)
        new_var = [
            (1 - weight) * var +
            tf.reduce_mean(i * (q - mean), axis=self.chain_axes,
                           keep_dims=True)
            for var, i, q, mean in zip(self.var, incr, x, update_mean)]

        update_var = [tf.assign(var, n_var)
                      for var, n_var in zip(self.var, new_var)]
        return update_var
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def batch_normalization(x, scope, decay=0.999, eps=1e-6, training=True):
    ndim = len(x.get_shape().as_list())
    fdim = x.get_shape().as_list()[-1]
    with tf.variable_scope(scope):
        gamma = tf.get_variable("scale", [fdim], tf.float32, tf.constant_initializer(1.0))
        beta = tf.get_variable("offset", [fdim], tf.float32, tf.constant_initializer(0.0))
        mean = tf.get_variable("mean", [fdim], tf.float32, tf.constant_initializer(0.0), trainable=False)
        var = tf.get_variable("variance", [fdim], tf.float32, tf.constant_initializer(1.0), trainable=False)
        if training:
            x_mean, x_var = tf.nn.moments(x, range(ndim - 1))
            avg_mean = tf.assign(mean, mean * decay + x_mean * (1.0 - decay))
            avg_var = tf.assign(var, var * decay + x_var * (1.0 - decay))
            with tf.control_dependencies([avg_mean, avg_var]):
                return tf.nn.batch_normalization(x, x_mean, x_var, beta, gamma, eps)
        else:
            return tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def batch_normalization_with_mask(x, mask, scope, decay=0.999, eps=1e-6, training=True):
    ndim = len(x.get_shape().as_list())
    fdim = x.get_shape().as_list()[-1]
    with tf.variable_scope(scope):
        gamma = tf.get_variable("scale", [fdim], tf.float32, tf.constant_initializer(1.0))
        beta = tf.get_variable("offset", [fdim], tf.float32, tf.constant_initializer(0.0))
        mean = tf.get_variable("mean", [fdim], tf.float32, tf.constant_initializer(0.0), trainable=False)
        var = tf.get_variable("variance", [fdim], tf.float32, tf.constant_initializer(1.0), trainable=False)
        if training:
            x_mean, x_var = tf.nn.weighted_moments(x, range(ndim - 1), mask)
            avg_mean = tf.assign(mean, mean * decay + x_mean * (1.0 - decay))
            avg_var = tf.assign(var, var * decay + x_var * (1.0 - decay))
            with tf.control_dependencies([avg_mean, avg_var]):
                return tf.nn.batch_normalization(x, x_mean, x_var, beta, gamma, eps)
        else:
            return tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_ref_assign(self):
        # Currently ngraph and tf have different assign semantics
        # eval(ng.assign(a, 1)) resturns None, but eval(tf.assign(a, 1)) returns
        # a which is 1.
        # TODO: fix this test after assign op / user_deps are fixed in ngraph
        # TODO: double assignments fails

        # tf placeholder
        a = tf.Variable(tf.constant(np.random.randn(2, 3), name="a"))
        b = tf.Variable(tf.constant(np.random.randn(2, 3), name="b"))
        init_op = tf.global_variables_initializer()
        a_update = tf.assign(a, b)

        # test
        tf_result = self.tf_run(a_update, tf_init_op=init_op)
        ng_result = self.ng_run(a)
        ng.testing.assert_allclose(tf_result, ng_result)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_ref_assign_add(self):
        # Currently ngraph and tf have different assign semantics
        # eval(ng.assign(a, 1)) resturns None, but eval(tf.assign(a, 1)) returns
        # a which is 1.
        # TODO: fix this test after assign op / user_deps are fixed in ngraph
        # TODO: double assignments fails

        # tf placeholder
        a = tf.Variable(tf.constant(np.random.randn(2, 3), name="a"))
        b = tf.Variable(tf.constant(np.random.randn(2, 3), name="b"))
        init_op = tf.global_variables_initializer()
        a_update = tf.assign_add(a, b)

        # test
        tf_result = self.tf_run(a_update, tf_init_op=init_op)
        ng_result = self.ng_run(a)
        ng.testing.assert_allclose(tf_result, ng_result)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def _compute_global_pc(self, dataset, session, limit=None):
        """ Compute the Principal Component. """
        _dataset = dataset
        if isinstance(limit, int):
            _dataset = _dataset[:limit]
        d = _dataset
        s0, s1, s2, s3 = d.shape[0], d.shape[1], d.shape[2], d.shape[3]
        flat = np.reshape(d, (s0, s1 * s2 * s3))
        sigma = np.dot(flat.T, flat) / flat.shape[1]
        U, S, V = np.linalg.svd(sigma)
        pc = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + _EPSILON))), U.T)
        self.global_pc.assign(pc, session)
        return pc

    # -----------------------
    #  Persistent Parameters
    # -----------------------
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def _compute_global_std(self, dataset, session, limit=None):
        """ Compute std of a dataset. A limit can be specified for faster
        computation, considering only 'limit' first elements. """
        _dataset = dataset
        std = 0.
        if isinstance(limit, int):
            _dataset = _dataset[:limit]
        if isinstance(_dataset, np.ndarray) and not self.global_std_pc:
            std = np.std(_dataset)
        else:
            for i in range(len(dataset)):
                if not self.global_std_pc:
                    std += np.std(dataset[i]) / len(dataset)
                else:
                    std += (np.std(dataset[i], axis=(0, 1),
                             keepdims=True) / len(dataset))[0][0]
        self.global_std.assign(std, session)
        return std
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def set_value(var, value, session=None):
    """ set_value.

    Set a variable's value. If no session provided, use default one.

    Arguments:
        var: `Variable`. The variable to assign a value.
        value: The value to assign. Must be compatible with variable dtype.
        session: `Session`. The session to perform the assignation.
            Default: the default session.

    """
    op = tf.assign(var, value=value)
    if not session:
        session = tf.get_default_session()
    return op.eval(session=session)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def init_training_mode():
    """  init_training_mode.

    Creates `is_training` variable and its ops if they haven't be created
    yet. This op is required if you are using layers such as dropout or
    batch normalization independently of TFLearn models (DNN or Trainer class).

    """
    # 'is_training' collection stores the training mode variable
    coll = tf.get_collection('is_training')
    if len(coll) == 0:
        tr_var = variable(
            "is_training", dtype=tf.bool, shape=[],
            initializer=tf.constant_initializer(False),
            trainable=False)
        tf.add_to_collection('is_training', tr_var)
        # 'is_training_ops' stores the ops to update training mode variable
        a = tf.assign(tr_var, True)
        b = tf.assign(tr_var, False)
        tf.add_to_collection('is_training_ops', a)
        tf.add_to_collection('is_training_ops', b)
项目:automatic-portrait-tf    作者:Corea    | 项目源码 | 文件源码
def set_default_value(self, sess, caffe_mat, layer_id_map):
        for layer_name, idxs in layer_id_map.items():
            idx, bias_term = idxs

            weight = caffe_mat[idx][1][0].transpose((2, 3, 1, 0))
            if bias_term:
                bias = caffe_mat[idx][1][1]

            if layer_name.startswith('upscore'):
                weight = weight[:, :, :self.output_dim, :self.output_dim]
                bias = bias[:self.output_dim]

            if layer_name.startswith('score'):
                weight = weight[:, :, :, :self.output_dim]
                bias = bias[:self.output_dim]

            name = layer_name + '_weight'
            sess.run(tf.assign(self.net[name], weight))

            if bias_term:
                name = layer_name + '_bias'
                sess.run(tf.assign(self.net[name], bias))
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def accumulate_strings(values, name="strings"):
  """Accumulates strings into a vector.

  Args:
    values: A 1-d string tensor that contains values to add to the accumulator.

  Returns:
    A tuple (value_tensor, update_op).
  """
  tf.assert_type(values, tf.string)
  strings = tf.Variable(
      name=name,
      initial_value=[],
      dtype=tf.string,
      trainable=False,
      collections=[],
      validate_shape=True)
  value_tensor = tf.identity(strings)
  update_op = tf.assign(
      ref=strings, value=tf.concat([strings, values], 0), validate_shape=False)
  return value_tensor, update_op
项目:YellowFin    作者:JianGoForIt    | 项目源码 | 文件源码
def update_hyper_param(self):
    assign_hyper_ops = []
    self._mu = tf.identity(tf.cond(
      self._do_tune, lambda: self.get_mu_tensor(),
      lambda: self._mu_var))
    with tf.control_dependencies([self._mu]):
      self._lr = tf.identity(tf.cond(
        self._do_tune, lambda: self.get_lr_tensor(),
        lambda: self._lr_var))

    with tf.control_dependencies([self._mu, self._lr]):
      if self._use_unsmoothed_lr_mu:
        assign_hyper_ops.append(tf.assign(self._mu_var, self._mu) )
        assign_hyper_ops.append(tf.assign(self._lr_var, self._lr) )
      else:
        self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu
        self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr
        with tf.control_dependencies([self._mu, self._lr] ):
          assign_hyper_ops.append(tf.assign(self._mu_var, self._mu) )
          assign_hyper_ops.append(tf.assign(self._lr_var, self._lr) )
    assign_hyper_op = tf.group(*assign_hyper_ops)
    return assign_hyper_op
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def set_param_values(self, flattened_params, sess=None, **tags):
        debug = tags.pop("debug", False)
        param_values = unflatten_tensors(
            flattened_params, self.get_param_shapes(**tags))
        ops = []
        feed_dict = dict()
        for param, dtype, value in zip(
                self.get_params(**tags),
                self.get_param_dtypes(**tags),
                param_values):
            if param not in self._cached_assign_ops:
                assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype, shape=param.get_shape())
                assign_op = tf.assign(param, assign_placeholder)
                self._cached_assign_ops[param] = assign_op
                self._cached_assign_placeholders[param] = assign_placeholder
            ops.append(self._cached_assign_ops[param])
            feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype)
            if debug:
                print("setting value of %s" % param.name)
        tf.get_default_session().run(ops, feed_dict=feed_dict)
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def predict_sym(self, xs):
        return L.get_output(self.l_out, xs)

    # def fit(self, xs, ys):
    #     if self._normalize_inputs:
    #         # recompute normalizing constants for inputs
    #         new_mean = np.mean(xs, axis=0, keepdims=True)
    #         new_std = np.std(xs, axis=0, keepdims=True) + 1e-8
    #         tf.get_default_session().run(tf.group(
    #             tf.assign(self._x_mean_var, new_mean),
    #             tf.assign(self._x_std_var, new_std),
    #         ))
    #         inputs = [xs, ys]
    #     loss_before = self._optimizer.loss(inputs)
    #     if self._name:
    #         prefix = self._name + "_"
    #     else:
    #         prefix = ""
    #     logger.record_tabular(prefix + 'LossBefore', loss_before)
    #     self._optimizer.optimize(inputs)
    #     loss_after = self._optimizer.loss(inputs)
    #     logger.record_tabular(prefix + 'LossAfter', loss_after)
    #     logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
项目:tf.rasterizer    作者:vahidk    | 项目源码 | 文件源码
def clear_fn(self):
        color = tf.placeholder(tf.float32, [3], name="ph_color")
        depth = tf.placeholder(tf.float32, [], name="ph_depth")
        packed_color = utils.pack_colors(color, 0)
        tiled_color = tf.fill([self.height, self.width], packed_color)
        tiled_depth = tf.fill([self.height, self.width], depth)
        assign_color = tf.assign(self.color, tiled_color)
        assign_depth = tf.assign(self.depth, tiled_depth)
        self.commands.append(assign_color)
        self.commands.append(assign_depth)

        def _clear(color_val=[0., 0., 0.], depth_val=FLT_MIN):
            self.args[color] = color_val
            self.args[depth] = depth_val

        return _clear
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def __init__(self, num_units, num_cores, forget_bias=1.0, timestep=0):
        """Initialize the basic LSTM cell.
        Args:
          num_units: int, The number of units in the LSTM cell.
          num_cores: int, The number of partitions (cores) in the LSTM state.
          forget_bias: float, The bias added to forget gates (see above).
        """
        self._num_units = num_units
        self._forget_bias = forget_bias
        # additional variables
        self._cores = tf.constant(num_cores)
        self._timestep = tf.Variable(timestep)  # assign to 0 then terminal (or epoch)
        self.reset_timestep = tf.assign(self._timestep, 0)
        # auxiliary operators
        dilated_mask, hold_mask = self._get_mask(num_cores)
        self._dilated_mask = tf.constant(dilated_mask, dtype=tf.float32)
        self._hold_mask = tf.constant(hold_mask, dtype=tf.float32)
项目:MachineLearningTutorial    作者:SpikeKing    | 项目源码 | 文件源码
def gnr_graph_checkpoint():
    """
    ???????checkpoint
    :return: 
    """
    I = tf.placeholder(tf.float32, shape=[None, 3], name='I')  # input
    W = tf.Variable(tf.zeros(shape=[3, 2]), dtype=tf.float32, name='W')  # weights
    b = tf.Variable(tf.zeros(shape=[2]), dtype=tf.float32, name='b')  # biases
    O = tf.nn.relu(tf.matmul(I, W) + b, name='O')  # activation / output

    saver = tf.train.Saver()
    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init_op)

        tf.train.write_graph(sess.graph_def, MODEL_FOLDER, 'tfdroid.pbtxt')  # ??TensorFlow??

        # ???????????
        sess.run(tf.assign(W, [[1, 2], [4, 5], [7, 8]]))
        sess.run(tf.assign(b, [1, 1]))

        # ??checkpoint????????
        saver.save(sess, MODEL_FOLDER + 'tfdroid.ckpt')
项目:A3C    作者:go2sea    | 项目源码 | 文件源码
def pull_params(self):
        pull_actor_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.actor_params, self.actor_params)]
        pull_critic_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.critic_params, self.critic_params)]
        return [pull_actor_params, pull_critic_params]
项目:A3C    作者:go2sea    | 项目源码 | 文件源码
def push_params(self):  # ???push????assign?????globalAC???lobal???
        push_actor_params = self.optimizer_actor.apply_gradients(zip(self.actor_grads, self.globalAC.actor_params))
        push_critic_params = self.optimizer_critic.apply_gradients(zip(self.critic_grads, self.globalAC.critic_params))
        return [push_actor_params, push_critic_params]
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def set_value(v, val):
    get_session().run(v.assign(val))
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def __init__(self, var_list, dtype=tf.float32):
        assigns = []
        shapes = list(map(var_shape, var_list))
        total_size = np.sum([intprod(shape) for shape in shapes])

        self.theta = theta = tf.placeholder(dtype,[total_size])
        start=0
        assigns = []
        for (shape,v) in zip(shapes,var_list):
            size = intprod(shape)
            assigns.append(tf.assign(v, tf.reshape(theta[start:start+size],shape)))
            start+=size
        self.op = tf.group(*assigns)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_sampling(self):
    hook = hooks.TrainSampleHook(
        params={"every_n_steps": 10}, model_dir=self.model_dir,
        run_config=tf.contrib.learn.RunConfig())

    global_step = tf.contrib.framework.get_or_create_global_step()
    no_op = tf.no_op()
    hook.begin()
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      sess.run(tf.tables_initializer())

      #pylint: disable=W0212
      mon_sess = monitored_session._HookedSession(sess, [hook])
      # Should trigger for step 0
      sess.run(tf.assign(global_step, 0))
      mon_sess.run(no_op)

      outfile = os.path.join(self.sample_dir, "samples_000000.txt")
      with open(outfile, "rb") as readfile:
        self.assertIn("Prediction followed by Target @ Step 0",
                      readfile.read().decode("utf-8"))

      # Should not trigger for step 9
      sess.run(tf.assign(global_step, 9))
      mon_sess.run(no_op)
      outfile = os.path.join(self.sample_dir, "samples_000009.txt")
      self.assertFalse(os.path.exists(outfile))

      # Should trigger for step 10
      sess.run(tf.assign(global_step, 10))
      mon_sess.run(no_op)
      outfile = os.path.join(self.sample_dir, "samples_000010.txt")
      with open(outfile, "rb") as readfile:
        self.assertIn("Prediction followed by Target @ Step 10",
                      readfile.read().decode("utf-8"))
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_capture(self):
    global_step = tf.contrib.framework.get_or_create_global_step()
    # Some test computation
    some_weights = tf.get_variable("weigths", [2, 128])
    computation = tf.nn.softmax(some_weights)

    hook = hooks.MetadataCaptureHook(
        params={"step": 5}, model_dir=self.model_dir,
        run_config=tf.contrib.learn.RunConfig())
    hook.begin()

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      #pylint: disable=W0212
      mon_sess = monitored_session._HookedSession(sess, [hook])
      # Should not trigger for step 0
      sess.run(tf.assign(global_step, 0))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      # Should trigger *after* step 5
      sess.run(tf.assign(global_step, 5))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      mon_sess.run(computation)
      self.assertEqual(
          set(gfile.ListDirectory(self.model_dir)),
          set(["run_meta", "tfprof_log", "timeline.json"]))
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
                             inc_loss_scale_every_n):
  """Returns the update op for loss scaling variables.

  We maintain the counter `loss_scale_normal_steps` to count the number of steps
  we have been using the current `loss_scale`. In most cases, this function
  increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
  greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
  and reset `loss_scale_normal_steps` to zero.

  This op is only called if the gradients don't have any infs or nans. Instead,
  if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
  reset `loss_scale_normal_steps` to zero.

  Args:
    loss_scale: a tf.Variable represneting the loss_scale value.
    loss_scale_normal_steps: a tf.Variable representing the number of training
      steps that have run since the loss_scale last changed.
    inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
      increased every `inc_loss_scale_every_n` steps, unless the gradients have
      infs or nans.

  Returns:
    An op for updating `loss_scale` and `loss_scale_normal_steps`.
  """

  def increment_loss_scale_normal_steps_func():
    return tf.group(loss_scale_normal_steps.assign_add(1))

  def increase_loss_scale_func():
    return tf.group(
        tf.assign(loss_scale_normal_steps, 0),
        tf.assign(loss_scale, loss_scale * 2))

  # true_fn and false_fn must have the same type.
  return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
                 increment_loss_scale_normal_steps_func,
                 increase_loss_scale_func)
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def fix_variables(self, sess, pretrained_model):
    print('Fix Resnet V1 layers..')
    with tf.variable_scope('Fix_Resnet_V1') as scope:
      with tf.device("/cpu:0"):
        # fix RGB to BGR
        conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False)
        restorer_fc = tf.train.Saver({self._scope + "/conv1/weights": conv1_rgb})
        restorer_fc.restore(sess, pretrained_model)

        sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/weights:0'], 
                           tf.reverse(conv1_rgb, [2])))
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def fix_variables(self, sess, pretrained_model):
    print('Fix MobileNet V1 layers..')
    with tf.variable_scope('Fix_MobileNet_V1') as scope:
      with tf.device("/cpu:0"):
        # fix RGB to BGR, and match the scale by (255.0 / 2.0)
        Conv2d_0_rgb = tf.get_variable("Conv2d_0_rgb", 
                                    [3, 3, 3, max(int(32 * self._depth_multiplier), 8)], 
                                    trainable=False)
        restorer_fc = tf.train.Saver({self._scope + "/Conv2d_0/weights": Conv2d_0_rgb})
        restorer_fc.restore(sess, pretrained_model)

        sess.run(tf.assign(self._variables_to_fix[self._scope + "/Conv2d_0/weights:0"], 
                           tf.reverse(Conv2d_0_rgb / (255.0 / 2.0), [2])))