Python tensorflow 模块,initialize_variables() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.initialize_variables()

项目:deep-q-learning    作者:alvinwan    | 项目源码 | 文件源码
def initialize_interdependent_variables(session, vars_list, feed_dict):
    """Initialize a list of variables one at a time, which is useful if
    initialization of some variables depends on initialization of the others.
    """
    vars_left = vars_list
    while len(vars_left) > 0:
        new_vars_left = []
        for v in vars_left:
            try:
                # If using an older version of TensorFlow, uncomment the line
                # below and comment out the line after it.
        #session.run(tf.initialize_variables([v]), feed_dict)
                session.run(tf.variables_initializer([v]), feed_dict)
            except tf.errors.FailedPreconditionError:
                new_vars_left.append(v)
        if len(new_vars_left) >= len(vars_left):
            # This can happend if the variables all depend on each other, or more likely if there's
            # another variable outside of the list, that still needs to be initialized. This could be
            # detected here, but life's finite.
            raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.")
        else:
            vars_left = new_vars_left
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def initialize_interdependent_variables(session, vars_list, feed_dict):
    """Initialize a list of variables one at a time, which is useful if
    initialization of some variables depends on initialization of the others.
    """
    vars_left = vars_list
    while len(vars_left) > 0:
        new_vars_left = []
        for v in vars_left:
            try:
                # If using an older version of TensorFlow, uncomment the line
                # below and comment out the line after it.
        #session.run(tf.initialize_variables([v]), feed_dict)
                session.run(tf.variables_initializer([v]), feed_dict)
            except tf.errors.FailedPreconditionError:
                new_vars_left.append(v)
        if len(new_vars_left) >= len(vars_left):
            # This can happend if the variables all depend on each other, or more likely if there's
            # another variable outside of the list, that still needs to be initialized. This could be
            # detected here, but life's finite.
            raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.")
        else:
            vars_left = new_vars_left
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
项目:bone-age    作者:radinformatics    | 项目源码 | 文件源码
def __init__(self, checkpoint_path):
        layers = 50
        num_blocks = [3, 4, 6, 3]
        self.inference = lambda images, is_train : inference(images, 
                                                   is_training=is_train, 
                                                   num_classes=NUM_AGES*2,
                                                   num_blocks=num_blocks, 
                                                   bottleneck=True)

        self.x = tf.placeholder(tf.uint8, shape=(256,256,3), name='input_image')
        self.crops = fixed_crops(self.x)
        self.logits = self.inference(self.crops, is_train=False)
        self.pred = tf.nn.softmax(self.logits, name='prediction')

        # Restore saved weights
        restore_variables = tf.trainable_variables() \
                + tf.moving_average_variables()
        self.saver = tf.train.Saver(restore_variables)
        self.sess = tf.Session()
        self.saver.restore(self.sess, checkpoint_path)

        #self.sess.run(tf.initialize_variables([var for var \
        #        in tf.all_variables() if var not in restore_variables]))
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def evaluate(self, t_data, t_label, s):
        state = self.fit_next(t_data, s, train=False)
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('evaluate'):
            return self.output_layer.evaluate(tf.transpose(state[0]), label)


        # decay_fn = tf.train.exponential_decay
        # Tutta sta roba da aggiornare???
        # loss = tf.argmax(self.ht, 1)
        # learning_rate_decay_fn=decay_fn
        # optimization = tf.contrib.layers.optimize_loss(self.ht, global_step=tf.Variable([1, 1]), optimizer=optimizer,
        #                                                learning_rate=0.01,
        #                                                variables=[self.weight_forget, self.weight_input, self.weight_output,
        #                                                           self.weight_C, self.biases_forget, self.biases_input,
        #                                                           self.biases_C, self.biases_output])
        # opt_op = optimizer.minimize(loss, var_list=[self.weight_forget, self.weight_input, self.weight_output,
        # self.weight_C, self.biases_forget, self.biases_input, self.biases_C,
        # self.biases_output])

##########################################################################
##########################################################################
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _test_streaming_sparse_average_precision_at_k(
      self, predictions, labels, k, expected, weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      predictions = tf.constant(predictions, tf.float32)
      metric, update = metrics.streaming_sparse_average_precision_at_k(
          predictions=predictions, labels=labels, k=k, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      local_variables = tf.local_variables()
      tf.initialize_variables(local_variables).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertAlmostEqual(expected, update.eval())
        self.assertAlmostEqual(expected, metric.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _test_streaming_sparse_precision_at_top_k(self,
                                                top_k_predictions,
                                                labels,
                                                expected,
                                                class_id=None,
                                                weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_precision_at_top_k(
          top_k_predictions=tf.constant(top_k_predictions, tf.int32),
          labels=labels, class_id=class_id, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _test_streaming_sparse_average_precision_at_k(
      self, predictions, labels, k, expected, weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      predictions = tf.constant(predictions, tf.float32)
      metric, update = metrics.streaming_sparse_average_precision_at_k(
          predictions, labels, k, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      local_variables = tf.local_variables()
      tf.initialize_variables(local_variables).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        _assert_nan(self, update.eval())
        _assert_nan(self, metric.eval())
      else:
        self.assertAlmostEqual(expected, update.eval())
        self.assertAlmostEqual(expected, metric.eval())
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
项目:tensorprob    作者:tensorprob    | 项目源码 | 文件源码
def test_scipy_lbfgsb():
    sess = tf.Session()
    x = tf.Variable(np.float64(2), name='x')
    sess.run(tf.initialize_variables([x]))
    optimizer = ScipyLBFGSBOptimizer(verbose=True, session=sess)
    # With gradient
    results = optimizer.minimize([x], x**2, [2 * x])
    assert results.success
    # Without gradient
    results = optimizer.minimize([x], x**2)
    assert results.success
    # Test callback
    def callback(xs):
        pass
    optimizer = ScipyLBFGSBOptimizer(verbose=True, session=sess, callback=callback)
    assert optimizer.minimize([x], x**2).success
    @raises(ValueError)
    def test_illegal_parameter_as_variable1():
        optimizer.minimize([42], x**2)
    test_illegal_parameter_as_variable1()
    @raises(ValueError)
    def test_illegal_parameter_as_variable2():
        optimizer.minimize(42, x**2)
    test_illegal_parameter_as_variable2()
项目:tensorprob    作者:tensorprob    | 项目源码 | 文件源码
def test_migrad():
    sess = tf.Session()
    x = tf.Variable(np.float64(2), name='x')
    sess.run(tf.initialize_variables([x]))
    optimizer = MigradOptimizer(session=sess)
    # With gradient
    results = optimizer.minimize([x], x**2, [2 * x])
    assert results.success
    # Without gradient
    results = optimizer.minimize([x], x**2)
    assert results.success
    @raises(ValueError)
    def test_illegal_parameter_as_variable1():
        optimizer.minimize([42], x**2)
    test_illegal_parameter_as_variable1()
    @raises(ValueError)
    def test_illegal_parameter_as_variable2():
        optimizer.minimize(42, x**2)
    test_illegal_parameter_as_variable2()
项目:thinstack-rl    作者:hans    | 项目源码 | 文件源码
def _create_state(self):
        """Prepare stateful variables modified during the recurrence."""

        # Both the queue and the stack are flattened stack_size * batch_size
        # tensors. `stack_size` many blocks of `batch_size` values
        stack_shape = (self.stack_size * self.batch_size, self.model_dim)
        self.stack = tf.Variable(tf.zeros(stack_shape, dtype=tf.float32),
                                 trainable=False, name="stack")
        self.queue = tf.Variable(tf.zeros((self.stack_size * self.batch_size,), dtype=tf.float32),
                                 trainable=False, name="queue")

        self.buff_cursors = tf.Variable(tf.zeros((self.batch_size,), dtype=tf.float32),
                                          trainable=False, name="buff_cursors")
        self.cursors = tf.Variable(tf.ones((self.batch_size,), dtype=tf.float32) * - 1,
                                   trainable=False, name="cursors")

        # TODO make parameterizable
        self.tracking_value = tf.Variable(tf.zeros((self.batch_size, self.tracking_dim), dtype=tf.float32),
                                          trainable=False, name="tracking_value")

        # Create an Op which will (re-)initialize the auxiliary variables
        # declared above.
        self._aux_vars = [self.stack, self.queue, self.buff_cursors, self.cursors,
                          self.tracking_value]
        self.variable_initializer = tf.initialize_variables(self._aux_vars)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def _initialize_variables():
    if hasattr(tf, 'global_variables'):
        variables = tf.global_variables()
    else:
        variables = tf.all_variables()

    uninitialized_variables = []
    for v in variables:
        if not hasattr(v, '_keras_initialized') or not v._keras_initialized:
            uninitialized_variables.append(v)
            v._keras_initialized = True
    if uninitialized_variables:
        sess = get_session()
        if hasattr(tf, 'variables_initializer'):
            sess.run(tf.variables_initializer(uninitialized_variables))
        else:
            sess.run(tf.initialize_variables(uninitialized_variables))
项目:AuthoringDecompositions    作者:jrock08    | 项目源码 | 文件源码
def __init__(self, codes_shape, scipy_gmm):
        self.graph = tf.Graph()
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = True
        self.sess = tf.Session(graph=self.graph, config=sess_config)

        print self.graph
        n,c = codes_shape

        with self.graph.as_default():
            self.pl = PlaceholderManager()
            self.pl.add_placeholder('codes', tf.float32, codes_shape)
            self.pl.add_placeholder('phase_train', tf.bool, [])

            with tf.variable_scope('GMM'):
                self.gmm = models.GMM(self.pl['codes'], scipy_gmm, self.pl['phase_train'])

        print var_collect.collect_all(self.graph)
        #tf.initialize_variables(
        #    var_list=var_collect.collect_all(self.graph)).run(session=self.sess)
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def init(self, sess):
        if not self.has_built_all:
            raise Exception(
                'Need to call build_all or build_eval before init')
        self._has_init = True
        my_var_list = self.get_all_vars()
        sess.run(tf.initialize_variables(my_var_list))
        return self
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_all_initialized(self):
    with self.test_session() as sess:
      x = tf.Variable(tf.zeros([]))
      sess.run(tf.initialize_variables([x]))
      self.assertEqual([], tdc._init_uninitialized(sess))
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def __init__(self, name):
    with tf.variable_scope('inv') as scope:
      self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
      self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)

      self.n_hidden = 1200

      W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
      b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)

      W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
      b_inv2 = bias_variable([X_L], name="b_inv2_"+name)

      self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]

      reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
      blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
      epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
      self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
      self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))

      optimizer = tf.train.RMSPropOptimizer(0.001)

      inv_gvs = optimizer.compute_gradients(self.cost)
      self.train_inv = optimizer.apply_gradients(inv_gvs)

      all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
      self.init = tf.initialize_variables(all_var_var)
      self.saver = tf.train.Saver(self.VARS)

  # train on a particular data batch
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def __init__(self, name):
    with tf.variable_scope('inv') as scope:
      self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
      self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)

      self.n_hidden = 1200

      W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
      b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)

      W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
      b_inv2 = bias_variable([X_L], name="b_inv2_"+name)

      self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]

      reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
      blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
      epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
      self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
      self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))

      optimizer = tf.train.RMSPropOptimizer(0.001)

      inv_gvs = optimizer.compute_gradients(self.cost)
      self.train_inv = optimizer.apply_gradients(inv_gvs)

      all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
      self.init = tf.initialize_variables(all_var_var)
      self.saver = tf.train.Saver(self.VARS)

  # train on a particular data batch
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def __init__(self, name):
    with tf.variable_scope('inv') as scope:
      self.true_label = tf.placeholder(tf.float32, [N_BATCH, X_L], name="true_label_"+name)
      self.observations = tf.placeholder(tf.float32, [N_BATCH, L, L, 2], name="obs_"+name)

      self.n_hidden = 1200

      W_inv1 = weight_variable([L*L*2, self.n_hidden], name="W_inv1_"+name)
      b_inv1 = bias_variable([self.n_hidden], name="b_inv1_"+name)

      W_inv2 = weight_variable([self.n_hidden,X_L], name="W_inv2_"+name)
      b_inv2 = bias_variable([X_L], name="b_inv2_"+name)

      self.VARS = [W_inv1, b_inv1, W_inv2, b_inv2]

      reshape_ob = tf.reshape(self.observations, [N_BATCH, L*L*2])
      blah = tf.nn.relu(tf.matmul(reshape_ob, W_inv1) + b_inv1)
      epsilon1 = tf.constant(1e-10, shape=[N_BATCH, X_L])
      self.pred = tf.nn.softmax(tf.matmul(blah, W_inv2) + b_inv2) + epsilon1
      self.cost = -tf.reduce_sum(self.true_label * tf.log(self.pred))

      optimizer = tf.train.RMSPropOptimizer(0.001)

      inv_gvs = optimizer.compute_gradients(self.cost)
      self.train_inv = optimizer.apply_gradients(inv_gvs)

      all_var_var = tf.get_collection(tf.GraphKeys.VARIABLES, scope='inv')
      self.init = tf.initialize_variables(all_var_var)
      self.saver = tf.train.Saver(self.VARS)

  # train on a particular data batch
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __setstate__(self, d):
        Serializable.__setstate__(self, d)
        global load_params
        if load_params:
            tf.get_default_session().run(tf.initialize_variables(self.get_params()))
            self.set_param_values(d["params"])
项目:CDBN-for-Tensorflow    作者:shygiants    | 项目源码 | 文件源码
def build_init_ops(self):
        if self.model_exists:
            self.init_ops = tf.initialize_local_variables()
        else:
            self.init_ops = [tf.initialize_local_variables(), tf.initialize_variables(self.variables)]
        self.saver = tf.train.Saver(self.variables)
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def fit_next(self, data, s, last_state=True, train=True):  # set choose optimizer
        with tf.name_scope('optimizer'):
            input_data_T = tf.transpose([data], name="input_data_T")

            if not self.ht:
                # Init h_t
                self.ht = tf.Variable(tf.random_normal([self.shape[0], 1]), trainable=False, name="ht_%d" % self.node_id)
                # Init C_t
                self.Ct = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Ct_%d" % self.node_id)
                #self.Cta = tf.Variable(tf.ones([self.shape[0] + input_length, self.shape[0]]), trainable=False, name="Cat_%d" % self.node_id)

                # Init layers variables
                self.ft = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="ft_%d" % self.node_id)
                self.it = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="it_%d" % self.node_id)
                self.Cta = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Cta_%d" % self.node_id)

                s.run(tf.initialize_variables([self.ht, self.Ct, self.ft, self.it, self.Cta]))

            with tf.name_scope('train_layer'):
                self.train_layer(input_data_T, s)
                if train:
                    self.state.append((self.ht, self.Ct)) # store the state of each step
                    ret = self.state[-1] if last_state else self.state
                else:
                    ret = (self.ht, self.Ct)
                    self.restore_state()
        return ret
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def minimize(self, data, t_label, s, optimizer):
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('cost_function'):
            cost = self.output_layer.compute_loss(tf.transpose(self.ht), label)
        with tf.name_scope('minimization'):
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            return optimizer.minimize(cost)
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def fit_next(self, data, s, last_state=True, train=True):  # set choose optimizer
        with tf.name_scope('optimizer'):
            input_data_T = tf.transpose([data], name="input_data_T")

            if not self.ht:
                # Init h_t
                self.ht = tf.Variable(tf.random_normal([self.shape[0], 1]), trainable=False, name="ht_%d" % self.node_id)
                # Init C_t
                self.Ct = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Ct_%d" % self.node_id)
                #self.Cta = tf.Variable(tf.ones([self.shape[0] + input_length, self.shape[0]]), trainable=False, name="Cat_%d" % self.node_id)

                # Init layers variables
                self.ft = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="ft_%d" % self.node_id)
                self.it = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="it_%d" % self.node_id)
                self.Cta = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Cta_%d" % self.node_id)

                s.run(tf.initialize_variables([self.ht, self.Ct, self.ft, self.it, self.Cta]))

            with tf.name_scope('train_layer'):
                self.train_layer(input_data_T, s)
                if train:
                    self.state.append((self.ht, self.Ct)) # store the state of each step
                    ret = self.state[-1] if last_state else self.state
                else:
                    ret = (self.ht, self.Ct)
                    self.restore_state()
        return ret
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def minimize(self, data, t_label, s, optimizer):
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('cost_function'):
            cost = self.output_layer.compute_loss(tf.transpose(self.ht), label)
        with tf.name_scope('minimization'):
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            return optimizer.minimize(cost, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def fit_next(self, data, s, last_state=True, train=True):  # set choose optimizer
        with tf.name_scope('optimizer'):
            input_data_T = tf.transpose([data], name="input_data_T")

            if not self.ht:
                # Init h_t
                self.ht = tf.Variable(tf.random_normal([self.shape[0], 1]), trainable=False, name="ht_%d" % self.node_id)
                # Init C_t
                self.Ct = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Ct_%d" % self.node_id)

                # Init layers variables
                self.ft = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="ft_%d" % self.node_id)
                self.it = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="it_%d" % self.node_id)
                self.Cta = tf.Variable(tf.ones([self.shape[0], 1]), trainable=False, name="Cta_%d" % self.node_id)

                s.run(tf.initialize_variables([self.ht, self.Ct, self.ft, self.it, self.Cta]))

            with tf.name_scope('train_layer'):
                self.train_layer(input_data_T, s)
                if train:
                    self.state.append((self.ht, self.Ct)) # store the state of each step
                    ret = self.state[-1] if last_state else self.state
                else:
                    ret = (self.ht, self.Ct)
                    self.restore_state()
        return ret
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def minimize(self, data, t_label, s, optimizer):
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('cost_function'):
            cost = self.output_layer.compute_loss(tf.transpose(self.ht), label)
        with tf.name_scope('minimization'):
            #optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
            return optimizer.minimize(cost, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def evaluate(self, t_data, t_label, s):
        state = self.fit_next(t_data, s, train=False)
        label = tf.Variable(t_label, name="label", trainable=False, dtype=tf.float32)
        s.run(tf.initialize_variables([label]))
        with tf.name_scope('evaluate'):
            return self.output_layer.evaluate(tf.transpose(state[0]), label)
项目:failures_of_DL    作者:shakedshammah    | 项目源码 | 文件源码
def Affine(name_scope,input_tensor,out_channels, relu=True, init_sess=None):
    input_shape = input_tensor.get_shape().as_list()
    input_channels = input_shape[-1]
    with tf.name_scope(name_scope):
        weights = tf.Variable(
            tf.truncated_normal([input_channels, out_channels],
                                stddev=1.0 / math.sqrt(float(input_channels))),name='weights')
        biases = tf.Variable(tf.zeros([out_channels]),name='biases')
        if init_sess is not None: init_sess.run(tf.initialize_variables([weights,biases]))
        if relu: return tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
        else: return tf.matmul(input_tensor, weights) + biases
项目:failures_of_DL    作者:shakedshammah    | 项目源码 | 文件源码
def Affine(name_scope,input_tensor,out_channels, relu=True):
    input_shape = input_tensor.get_shape().as_list()
    input_channels = input_shape[-1]
    with tf.name_scope(name_scope):
        weights = tf.Variable(
            tf.truncated_normal([input_channels, out_channels],
                                stddev=1.0 / math.sqrt(float(input_channels))),name='weights')
        biases = tf.Variable(tf.zeros([out_channels]),name='biases')
#         initializer = tf.initialize_variables([weights,biases])
        if relu: return tf.nn.relu(tf.matmul(input_tensor, weights) + biases)#,initializer
        else: return tf.matmul(input_tensor, weights) + biases#,initializer
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _test_streaming_sparse_precision_at_k(self,
                                            predictions,
                                            labels,
                                            k,
                                            expected,
                                            class_id=None,
                                            ignore_mask=None,
                                            weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if ignore_mask is not None:
        ignore_mask = tf.constant(ignore_mask, tf.bool)
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32), labels=labels,
          k=k, class_id=class_id, ignore_mask=ignore_mask, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
    expected_precision = 0.5
    with self.test_session():
      _, precision = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_precision, precision.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _test_streaming_sparse_recall_at_k(self,
                                         predictions,
                                         labels,
                                         k,
                                         expected,
                                         class_id=None,
                                         ignore_mask=None,
                                         weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if ignore_mask is not None:
        ignore_mask = tf.constant(ignore_mask, tf.bool)
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_recall_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=labels, k=k, class_id=class_id, ignore_mask=ignore_mask,
          weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        self.assertTrue(math.isnan(update.eval()))
        self.assertTrue(math.isnan(metric.eval()))
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
    expected_recall = 0.5
    with self.test_session():
      _, recall = metrics.streaming_sparse_recall_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_recall, recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _test_streaming_sparse_precision_at_k(self,
                                            predictions,
                                            labels,
                                            k,
                                            expected,
                                            class_id=None,
                                            weights=None):
    with tf.Graph().as_default() as g, self.test_session(g):
      if weights is not None:
        weights = tf.constant(weights, tf.float32)
      metric, update = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32), labels=labels,
          k=k, class_id=class_id, weights=weights)

      # Fails without initialized vars.
      self.assertRaises(tf.OpError, metric.eval)
      self.assertRaises(tf.OpError, update.eval)
      tf.initialize_variables(tf.local_variables()).run()

      # Run per-step op and assert expected values.
      if math.isnan(expected):
        _assert_nan(self, update.eval())
        _assert_nan(self, metric.eval())
      else:
        self.assertEqual(expected, update.eval())
        self.assertEqual(expected, metric.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_top_k_rank_invalid(self):
    with self.test_session():
      # top_k_predictions has rank < 2.
      top_k_predictions = [9, 4, 6, 2, 0]
      sp_labels = tf.SparseTensorValue(
          indices=np.array([[0,], [1,], [2,]], np.int64),
          values=np.array([2, 7, 8], np.int64),
          shape=np.array([10,], np.int64))

      with self.assertRaises(ValueError):
        precision, _ = metrics.streaming_sparse_precision_at_top_k(
            top_k_predictions=tf.constant(top_k_predictions, tf.int64),
            labels=sp_labels)
        tf.initialize_variables(tf.local_variables()).run()
        precision.eval()
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
    expected_precision = 0.5
    with self.test_session():
      _, precision = metrics.streaming_sparse_precision_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_precision, precision.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def test_sparse_tensor_value(self):
    predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
    labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
    expected_recall = 0.5
    with self.test_session():
      _, recall = metrics.streaming_sparse_recall_at_k(
          predictions=tf.constant(predictions, tf.float32),
          labels=_binary_2d_label_to_sparse_value(labels), k=1)

      tf.initialize_variables(tf.local_variables()).run()

      self.assertEqual(expected_recall, recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def initialize_op(self):
    """Returns an op for initializing tensorflow variables."""
    all_vars = self._row_factors + self._col_factors
    all_vars.extend([self._row_gramian, self._col_gramian])
    if self._row_weights is not None:
      assert self._col_weights is not None
      all_vars.extend(self._row_weights + self._col_weights)
    return tf.initialize_variables(all_vars)
项目:tensorlight    作者:bsautermeister    | 项目源码 | 文件源码
def initialize_uninitialized_variables(session, var_list=None):
    """Initializes all uninitialized variables.
    Parameters
    ----------
    session: tf.Session
        The TensorFlow session to scan for uninitialized variables
    var_list: list(tf.Varaible) or None
        The list of variables to filter for uninitialized ones.
        Defaults to tf.all_variables() is used.
    """
    uninit_vars = uninitialized_variables(session, var_list)
    session.run(tf.initialize_variables(uninit_vars))
项目:rbm_based_autoencoders_with_tensorflow    作者:ikhlestov    | 项目源码 | 文件源码
def _train_layer_pair(self):
        self.build_model()
        prev_run_no = self.params.get('run_no', None)
        self.define_runner_folders()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            self.sess = sess

            if prev_run_no:
                print("Restore variables from previous run:")
                restore_vars_dict = self._get_restored_variables_names()
                for var_name in restore_vars_dict.keys():
                    print("\t%s" % var_name)
                restorer = tf.train.Saver(restore_vars_dict)
                restorer.restore(sess, self.saves_path)
                print("Initialize not restored variables:")
                new_variables = self._get_new_variables_names()
                for var in new_variables:
                    print("\t%s" % var.name)
                sess.run(tf.initialize_variables(new_variables))

            else:
                print("Initialize new variables")
                tf.initialize_all_variables().run()
            self.summary_writer = tf.train.SummaryWriter(
                self.logs_dir, sess.graph)
            for epoch in range(self.params['epochs']):
                start = time.time()
                self._epoch_train_step()
                time_cons = time.time() - start
                time_cons = str(datetime.timedelta(seconds=time_cons))
                print("Epoch: %d, time consumption: %s" % (epoch, time_cons))

            # Save all trained variables
            saver = tf.train.Saver()
            saver.save(sess, self.saves_path)
项目:DDPG    作者:MOCR    | 项目源码 | 文件源码
def __init__(self, x, size, selectTrain, sess, toTarget=None, ts=0.001):

        self.sess = sess
        self.mean_x_train, self.variance_x_train = moments(x, [0])

        #self.mean_x_ma, self.variance_x_ma = moments(self.x_splh, [0])

        self.mean_x_ma = tf.Variable(tf.zeros([size]))
        self.variance_x_ma = tf.Variable(tf.ones([size]))


        self.update = tf.tuple([self.variance_x_ma.assign(0.95*self.variance_x_ma+ 0.05*self.variance_x_train)] , control_inputs=[self.mean_x_ma.assign(0.95*self.mean_x_ma+ 0.05*self.mean_x_train)])[0]
        self.mean_x_ma_update = tf.tuple([self.mean_x_train] , control_inputs=[])[0]
        self.printUp = tf.Print(self.mean_x_ma_update, [selectTrain], message="selectTrain value : ")
        self.variance_x_ma_update = tf.tuple([self.variance_x_train], control_inputs=[])[0]

        def getxmau(): return self.mean_x_ma_update
        def getxma(): return self.mean_x_ma    

        def getvxmau(): return self.variance_x_ma_update
        def getvxma(): return self.variance_x_ma

        self.mean_x = tf.cond(selectTrain, getxmau, getxma)
        self.variance_x = tf.cond(selectTrain, getvxmau, getvxma)

        self.beta = tf.Variable(tf.zeros([size]))
        self.gamma = tf.Variable(tf.ones([size]))

        #tfs.tfs.session.run(tf.initialize_variables([self.beta, self.gamma]))#, self.mean_x_ma, self.variance_x_ma]))
        self.xNorm = tf.reshape(tf.nn.batch_norm_with_global_normalization(tf.reshape(x, [-1, 1, 1, size]), self.mean_x, self.variance_x, self.beta, self.gamma, 0.01, True), [-1, size])

        if toTarget!=None:
            self.isTracking = toTarget
            self.updateBeta = self.beta.assign(self.beta*(1-ts)+self.isTracking.beta*ts)
            self.updateGamma = self.gamma.assign(self.gamma*(1-ts)+self.isTracking.gamma*ts)
            self.updateTarget = tf.group(self.updateBeta, self.updateGamma)
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def __setstate__(self, d):
        Serializable.__setstate__(self, d)
        global load_params
        if load_params:
            tf.get_default_session().run(tf.initialize_variables(self.get_params()))
            self.set_param_values(d["params"])
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def __init__(self, model_dir=None, gpu_fraction=0.7):
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.per_process_gpu_memory_fraction=gpu_fraction
        self.sess = tf.Session(config=config)
        self.imgs_ph, self.bn, self.output_tensors, self.pred_labels, self.pred_locs = model.model(self.sess)
        total_boxes = self.pred_labels.get_shape().as_list()[1]
        self.positives_ph, self.negatives_ph, self.true_labels_ph, self.true_locs_ph, self.total_loss, self.class_loss, self.loc_loss = \
            model.loss(self.pred_labels, self.pred_locs, total_boxes)
        out_shapes = [out.get_shape().as_list() for out in self.output_tensors]
        c.out_shapes = out_shapes
        c.defaults = model.default_boxes(out_shapes)

        # variables in model are already initialized, so only initialize those declared after
        with tf.variable_scope("optimizer"):
            self.global_step = tf.Variable(0)
            self.lr_ph = tf.placeholder(tf.float32, shape=[])

            self.optimizer = tf.train.AdamOptimizer(1e-3).minimize(self.total_loss, global_step=self.global_step)
        new_vars = tf.get_collection(tf.GraphKeys.VARIABLES, scope="optimizer")
        self.sess.run(tf.initialize_variables(new_vars))

        if model_dir is None:
            model_dir = FLAGS.model_dir

        ckpt = tf.train.get_checkpoint_state(model_dir)
        self.saver = tf.train.Saver()

        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("restored %s" % ckpt.model_checkpoint_path)
项目:thinstack-rl    作者:hans    | 项目源码 | 文件源码
def test_basic_ff(self):
        self._make_stack(seq_length=5)

        X = np.array([
            [3, 1,  2],
            [3, 2,  4]
        ], dtype=np.int32).T

        transitions = np.array([
            [0, 0, 0, 1, 1],
            [0, 0, 1, 0, 1]
        ], dtype=np.float32)

        num_transitions = np.array([4, 4], dtype=np.int32)

        expected = np.array([[ 3.,  3.,  3.],
                             [ 3.,  3.,  3.],
                             [ 1.,  1.,  1.],
                             [ 2.,  2.,  2.],
                             [ 2.,  2.,  2.],
                             [ 5.,  5.,  5.],
                             [ 3.,  3.,  3.],
                             [ 4.,  4.,  4.],
                             [ 6.,  6.,  6.],
                             [ 9.,  9.,  9.]])

        # Run twice to make sure first state is properly erased
        with self.test_session() as s:
            s.run(tf.initialize_variables(tf.trainable_variables()))
            ts = self.stack

            feed = {ts.transitions[t]: transitions[:, t]
                    for t in range(self.seq_length)}
            feed[ts.buff] = X
            feed[ts.num_transitions] = num_transitions

            for _ in range(2):
                ts.reset(s)

                ret = s.run(ts.stack, feed)
                np.testing.assert_almost_equal(ret, expected)
项目:tf_img_tech    作者:david-berthelot    | 项目源码 | 文件源码
def init(self):
        if not self.is_initialized:
            print('Initializing...')
            uninitialized_vars = []
            for var in tf.all_variables():
                try:
                    self.session.run(var)
                except tf.errors.FailedPreconditionError:
                    uninitialized_vars.append(var)
            tf.initialize_variables(uninitialized_vars).run()
            self.is_initialized = True
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def assign_from_pkl(self, pkl_path):
    with open(pkl_path, 'rb') as f:
      load_variables = pickle.load(f)

    uninitialized_vars = []
    for i, variable in enumerate(tf.global_variables()):
      # 0 -41
      # 42-77 + 10
      # 78-117+ 20
      if i<=41:
        idx = i
      elif i<=77:
        idx = i + 10
      elif i<=117:
        idx = i + 20
      else:
        uninitialized_vars.append(variable)
        continue

      variable_shape = load_variables[idx].shape
      if len(variable_shape) == 1:
        load_variable = load_variables[idx]
      elif len(variable_shape) == 4:
        load_variable = np.transpose(load_variables[idx], [3, 2, 1, 0])
      elif len(variable_shape) == 3:
        load_variable = np.transpose(load_variables[idx], [2, 1, 0])
      else:
        assert False

      print variable.name, variable.get_shape(), load_variable.shape
      variable.assign(load_variable).op.run()

    pdb.set_trace()
    tf.initialize_variables(uninitialized_vars).op.run()
    return
项目:Face-Recognition    作者:aswl01    | 项目源码 | 文件源码
def restore_variables(sess, saver, pretrained_model):
    saver.restore(sess, pretrained_model)
    uninit_vars = []
    for var in tf.all_variables():
        try:
            sess.run(var)
        except tf.errors.FailedPreconditionError:
            uninit_vars.append(var)

    init_new_vars_op = tf.initialize_variables(uninit_vars)
    sess.run(init_new_vars_op)
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def _setup_graph(self):
        self._init_mask_op = tf.initialize_variables(tf.get_collection('masks'))
        self._init_thre_op = tf.initialize_variables(tf.get_collection('thresholds'))