Python tensorflow 模块,set_random_seed() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.set_random_seed()

项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def show_shrinkage(shrink_func,theta,**kwargs):
    tf.reset_default_graph()
    tf.set_random_seed(kwargs.get('seed',1) )

    N = kwargs.get('N',500)
    L = kwargs.get('L',4)
    nsigmas = kwargs.get('sigmas',10)
    shape = (N,L)
    rvar = 1e-4
    r = np.reshape( np.linspace(0,nsigmas,N*L)*math.sqrt(rvar),shape)
    r_ = tfcf(r)
    rvar_ = tfcf(np.ones(L)*rvar)

    xhat_,dxdr_ = shrink_func(r_,rvar_ ,tfcf(theta))

    with tf.Session() as sess:
        sess.run( tf.global_variables_initializer() )
        xhat = sess.run(xhat_)
    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.plot(r.reshape(-1),r.reshape(-1),'y')
    plt.plot(r.reshape(-1),xhat.reshape(-1),'b')
    if kwargs.has_key('title'):
        plt.suptitle(kwargs['title'])
    plt.show()
项目:shalo    作者:henryre    | 项目源码 | 文件源码
def _embed_sentences(self):
        """Embed sentences via the last output cell of an LSTM"""
        word_embeddings = self._get_embedding()
        word_feats      = tf.nn.embedding_lookup(word_embeddings, self.input)
        batch_size      = tf.shape(self.input)[0]
        with tf.variable_scope("LSTM") as scope:
            tf.set_random_seed(self.seed - 1)
            # LSTM architecture
            cell = tf.contrib.rnn.BasicLSTMCell(self.d)
            # Set RNN
            initial_state = cell.zero_state(batch_size, tf.float32)
            rnn_out, _ = tf.nn.dynamic_rnn(
                cell, word_feats, sequence_length=self.input_lengths,
                initial_state=initial_state, time_major=False               
            )
        # Get potentials
        return get_rnn_output(rnn_out, self.d, self.input_lengths), {}
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateOnecloneWithPS(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1,
                                                    num_ps_tasks=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(clones), 1)
      clone = clones[0]
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertDeviceEqual(clone.device, '/job:worker')
      self.assertEqual(clone.scope, '')
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
        self.assertDeviceEqual(v.device, v.value().device)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def set_seed(seed):
    seed %= 4294967294
    global seed_
    seed_ = seed
    import lasagne
    random.seed(seed)
    np.random.seed(seed)
    lasagne.random.set_rng(np.random.RandomState(seed))
    try:
        import tensorflow as tf
        tf.set_random_seed(seed)
    except Exception as e:
        print(e)
    print((
        colorize(
            'using seed %s' % (str(seed)),
            'green'
        )
    ))
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def _sample(self):
        gan = self.gan
        z_t = gan.encoder.sample
        inputs_t = gan.inputs.x

        if self.z is None:
            self.z = gan.encoder.sample.eval()
            self.target = gan.encoder.sample.eval()
            self.input = gan.session.run(gan.inputs.x)

        if self.step > self.steps:
            self.z = self.target
            self.target = gan.encoder.sample.eval()
            self.step = 0

        percent = float(self.step)/self.steps
        z_interp = self.z*(1.0-percent) + self.target*percent
        self.step+=1

        g=tf.get_default_graph()
        with g.as_default():
            tf.set_random_seed(1)
            return {
                'generator': gan.session.run(gan.generator.sample, feed_dict={z_t: z_interp, inputs_t: self.input})
            }
项目:gait-recognition    作者:marian-margeta    | 项目源码 | 文件源码
def __init__(self, name = None, num_of_persons = 0, recurrent_unit = 'GRU', rnn_layers = 1,
                 reuse = False, is_training = False, input_net = None):
        tf.set_random_seed(SEED)

        if num_of_persons <= 0 and is_training:
            raise Exception('Parameter num_of_persons has to be greater than zero when thaining')

        self.num_of_persons = num_of_persons
        self.rnn_layers = rnn_layers
        self.recurrent_unit = recurrent_unit

        if input_net is None:
            input_tensor = tf.placeholder(
                dtype = tf.float32,
                shape = (None, 17, 17, 32),
                name = 'input_image')
        else:
            input_tensor = input_net

        super().__init__(name, input_tensor, self.FEATURES, num_of_persons, reuse, is_training)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testComputation(self):
    tf.set_random_seed(0)
    with self.test_session() as sess:
      initializer = snt.nets.noisy_identity_kernel_initializer(2, stddev=1e-20)
      x = initializer([3, 3, 4, 8])
      x = tf.reduce_sum(x, axis=[3])
      x_ = sess.run(x)

      # Iterate over elements. After summing over depth, assert that only the
      # middle pixel is on.
      it = np.nditer(x_, flags=["multi_index"])
      while not it.finished:
        value, idx = it[0], it.multi_index
        (filter_height, filter_width, _) = idx
        if filter_height == 1 and filter_width == 1:
          self.assertAllClose(value, 1)
        else:
          self.assertAllClose(value, 0)
        it.iternext()
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def set_seed(seed):
    seed %= 4294967294
    global seed_
    seed_ = seed
    random.seed(seed)
    np.random.seed(seed)
    try:
        import lasagne
        lasagne.random.set_rng(np.random.RandomState(seed))
    except Exception as e:
        print(e)
    try:
        import tensorflow as tf
        tf.set_random_seed(seed)
    except Exception as e:
        print(e)
    print((
        colorize(
            'using seed %s' % (str(seed)),
            'green'
        )
    ))
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testCreateOnecloneWithPS(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1,
                                                    num_ps_tasks=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(clones), 1)
      clone = clones[0]
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertDeviceEqual(clone.device, '/job:worker')
      self.assertEqual(clone.scope, '')
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
        self.assertDeviceEqual(v.device, v.value().device)
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
项目:LiTeFlow    作者:petrux    | 项目源码 | 文件源码
def test_time(self):
        """Test that a `time` over the `length` triggers a finished flag."""
        tf.set_random_seed(23)
        time = tf.convert_to_tensor(5, dtype=tf.int32)
        lengths = tf.constant([4, 5, 6, 7])
        output = tf.random_normal([4, 10, 3], dtype=tf.float32)
        finished = layers.TerminationHelper(lengths).finished(time, output)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            act_finished = sess.run(finished)

        # NOTA BENE: we have set that
        # time = 5
        # lengths = [4, 5, 6, 7]
        #
        # Since the time is 0-based, having time=5 means that
        # we have alread scanned through 5 elements, so only
        # the last sequence in the batch is ongoing.
        exp_finished = [True, True, True, False]
        self.assertAllEqual(exp_finished, act_finished)
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def testCreateOnecloneWithPS(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1,
                                                    num_ps_tasks=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(clones), 1)
      clone = clones[0]
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertDeviceEqual(clone.device, '/job:worker')
      self.assertEqual(clone.scope, '')
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
        self.assertDeviceEqual(v.device, v.value().device)
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def _check_adam():
    for _mode in HO_MODES[:2]:
        for _model in IMPLEMENTED_MODEL_TYPES[1:2]:
            _model_kwargs = {'dims': [None, 300, 300, None]}
            tf.reset_default_graph()

            # set random seeds!!!!
            np.random.seed(1)
            tf.set_random_seed(1)

            experiment('test_with_model_' + _model,
                       collect_data=False, hyper_iterations=3, mode=_mode, epochs=3,
                       optimizer=rf.AdamOptimizer,
                       optimizer_kwargs={'lr': tf.Variable(.001, name='eta_adam')},
                       model=_model,
                       model_kwargs=_model_kwargs,
                       set_T=100,
                       )
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def _check_forward():
    w_100 = []
    for i in range(1):
        for _mode in HO_MODES[0:1]:
            for _model in IMPLEMENTED_MODEL_TYPES[0:2]:
                _model_kwargs = {}  # {'dims': [None, 300, 300, None]}
                tf.reset_default_graph()
                # set random seeds!!!!
                np.random.seed(1)
                tf.set_random_seed(1)

                results = experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=10, mode=_mode,
                                     epochs=None,
                                     model=_model,
                                     model_kwargs=_model_kwargs,
                                     set_T=1000,
                                     synthetic_hypers=None,
                                     hyper_batch_size=100
                                     # optimizer=rf.GradientDescentOptimizer,
                                     # optimizer_kwargs={'lr': tf.Variable(.01, name='eta')}
                                     )
                w_100.append(results[0]['weights'])
    # rf.save_obj(w_100, 'check_forward')
    return w_100
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def _check_all_methods():
    for _mode in HO_MODES[:]:
        for _model in IMPLEMENTED_MODEL_TYPES:
            # _model_kwargs = {'dims': [None, 300, 300, None]}
            tf.reset_default_graph()
            # set random seeds!!!!
            np.random.seed(1)
            tf.set_random_seed(1)

            experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode,
                       # epochs=3,
                       model=_model,
                       # model_kwargs=_model_kwargs,
                       set_T=100,
                       synthetic_hypers=None,
                       hyper_batch_size=100
                       # optimizer=rf.GradientDescentOptimizer,
                       # optimizer_kwargs={'lr': tf.Variable(.01, name='eta')}
                       )
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def _check_cnn():
    print('END')
    for _mode in HO_MODES[2:3]:
        for _model in IMPLEMENTED_MODEL_TYPES[2:3]:
            tf.reset_default_graph()
            np.random.seed(1)
            tf.set_random_seed(1)

            _model_kwargs = {'conv_dims': [[5, 5, 1, 2], [5, 5, 2, 4], [5, 5, 4, 8]],
                             'ffnn_dims': [128, 10]}

            # noinspection PyTypeChecker
            experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode,
                       epochs=2,
                       model=_model,
                       model_kwargs=_model_kwargs,
                       set_T=100,
                       synthetic_hypers=None,
                       hyper_batch_size=100,
                       l1=None,
                       l2=None
                       # optimizer=rf.GradientDescentOptimizer,
                       # optimizer_kwargs={'lr': tf.Variable(.01, name='eta')}
                       )
项目:RC-experiments    作者:cairoHy    | 项目源码 | 文件源码
def __init__(self):
        self.model_name = self.__class__.__name__
        self.sess = tf.Session()
        # get arguments
        self.args = self.get_args()

        # log set
        logging.basicConfig(filename=self.args.log_file,
                            level=logging.DEBUG,
                            format='%(asctime)s %(message)s', datefmt='%y-%m-%d %H:%M')

        # set random seed
        np.random.seed(self.args.random_seed)
        tf.set_random_seed(self.args.random_seed)

        # save arguments
        save_args(args=self.args)
项目:FeatureSqueezing    作者:QData    | 项目源码 | 文件源码
def load_tf_session():
    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'th':
        keras.backend.set_image_dim_ordering('th')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to 'tf', temporarily setting to 'th'")

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)
    print("Created TensorFlow session and set Keras backend.")
    return sess


# Get MNIST test data
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testCreateOnecloneWithPS(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1,
                                                    num_ps_tasks=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(clones), 1)
      clone = clones[0]
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertDeviceEqual(clone.device, '/job:worker')
      self.assertEqual(clone.scope, '')
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
        self.assertDeviceEqual(v.device, v.value().device)
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def test_statistics(self):
        """Check that `_statistics` gives the same result as `nn.moments`."""
        tf.set_random_seed(1234)

        tensors = tf.random_normal([4, 5, 7, 3])
        for axes in [(3), (0, 2), (1, 2, 3)]:
            vb_mean, mean_sq = virtual_batchnorm._statistics(tensors, axes)
            mom_mean, mom_var = tf.nn.moments(tensors, axes)
            vb_var = mean_sq - tf.square(vb_mean)

            with self.test_session(use_gpu=True) as sess:
                vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
                    vb_mean, vb_var, mom_mean, mom_var])

            self.assertAllClose(mom_mean_np, vb_mean_np)
            self.assertAllClose(mom_var_np, vb_var_np)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def test_reference_batch_normalization(self):
        """Check that batch norm from VBN agrees with opensource implementation."""
        tf.set_random_seed(1234)

        batch = tf.random_normal([6, 5, 7, 3, 3])

        for axis in range(5):
            # Get `layers` batchnorm result.
            bn_normalized = tf.layers.batch_normalization(
                batch, axis, training=True)

            # Get VBN's batch normalization on reference batch.
            batch_axis = 0 if axis is not 0 else 1  # axis and batch_axis can't same
            vbn = virtual_batchnorm.VBN(batch, axis, batch_axis=batch_axis)
            vbn_normalized = vbn.reference_batch_normalization()

            with self.test_session(use_gpu=True) as sess:
                tf.global_variables_initializer().run()

                bn_normalized_np, vbn_normalized_np = sess.run(
                    [bn_normalized, vbn_normalized])
            self.assertAllClose(bn_normalized_np, vbn_normalized_np)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def test_same_as_batchnorm(self):
        """Check that batch norm on set X is the same as ref of X / y on `y`."""
        tf.set_random_seed(1234)

        num_examples = 4
        examples = [tf.random_normal([5, 7, 3]) for _ in
                    range(num_examples)]

        # Get the result of the opensource batch normalization.
        batch_normalized = tf.layers.batch_normalization(
            tf.stack(examples), training=True)

        for i in range(num_examples):
            examples_except_i = tf.stack(examples[:i] + examples[i + 1:])
            # Get the result of VBN's batch normalization.
            vbn = virtual_batchnorm.VBN(examples_except_i)
            vb_normed = tf.squeeze(
                vbn(tf.expand_dims(examples[i], [0])), [0])

            with self.test_session(use_gpu=True) as sess:
                tf.global_variables_initializer().run()
                bn_np, vb_np = sess.run([batch_normalized, vb_normed])
            self.assertAllClose(bn_np[i, ...], vb_np)
项目:deep-makeover    作者:david-gpu    | 项目源码 | 文件源码
def _setup_tensorflow():
    # Create session
    config = tf.ConfigProto(log_device_placement=False) #, intra_op_parallelism_threads=1)
    sess   = tf.Session(config=config)

    # Initialize all RNGs with a deterministic seed
    with sess.graph.as_default():
        tf.set_random_seed(FLAGS.random_seed)

    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    return sess


# TBD: Move to dm_train.py?
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def main(_):
    with tf.Session() as sess:
        env = EquityEnvironment(assets, look_back, episode_length, look_back_reinforcement, price_series, train=True)
        np.random.seed(RANDOM_SEED)
        tf.set_random_seed(RANDOM_SEED)

        state_dim = num_inputs
        action_dim = num_actions
        action_bound = num_action_bound
        # Ensure action bound is symmetric
        # assert (env.action_space.high == -env.action_space.low)

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound, \
                             ACTOR_LEARNING_RATE, TAU)

        critic = CriticNetwork(sess, state_dim, action_dim, \
                               CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars())

        actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))

        train(sess, env, actor, critic, actor_noise)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testAtrousFullyConvolutionalValues(self):
    """Verify dense feature extraction with atrous convolution."""
    nominal_stride = 32
    for output_stride in [4, 8, 16, 32, None]:
      with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
        with tf.Graph().as_default():
          with self.test_session() as sess:
            tf.set_random_seed(0)
            inputs = create_test_input(2, 81, 81, 3)
            # Dense feature extraction followed by subsampling.
            output, _ = self._resnet_small(inputs, None, global_pool=False,
                                           output_stride=output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride
            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            tf.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected, _ = self._resnet_small(inputs, None, global_pool=False)
            sess.run(tf.initialize_all_variables())
            self.assertAllClose(output.eval(), expected.eval(),
                                atol=1e-4, rtol=1e-4)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testAtrousFullyConvolutionalValues(self):
    """Verify dense feature extraction with atrous convolution."""
    nominal_stride = 32
    for output_stride in [4, 8, 16, 32, None]:
      with slim.arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
        with tf.Graph().as_default():
          with self.test_session() as sess:
            tf.set_random_seed(0)
            inputs = create_test_input(2, 81, 81, 3)
            # Dense feature extraction followed by subsampling.
            output, _ = self._resnet_small(inputs, None, global_pool=False,
                                           output_stride=output_stride)
            if output_stride is None:
              factor = 1
            else:
              factor = nominal_stride // output_stride
            output = resnet_utils.subsample(output, factor)
            # Make the two networks use the same weights.
            tf.get_variable_scope().reuse_variables()
            # Feature extraction at the nominal network rate.
            expected, _ = self._resnet_small(inputs, None, global_pool=False)
            sess.run(tf.initialize_all_variables())
            self.assertAllClose(output.eval(), expected.eval(),
                                atol=1e-4, rtol=1e-4)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(
          total_loss, optimizer)

      loss = slim.learning.train(
          train_op, self._logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertLess(loss, .1)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNonDefaultGraph(self):
    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs8/')
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

    loss = slim.learning.train(
        train_op, self._logdir, number_of_steps=300, log_every_n_steps=10,
        graph=g)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNoneAsLogdir(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      loss = slim.learning.train(
          train_op, None, number_of_steps=300, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithSessionConfig(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      session_config = tf.ConfigProto(allow_soft_placement=True)
      loss = slim.learning.train(
          train_op,
          None,
          number_of_steps=300,
          log_every_n_steps=10,
          session_config=session_config)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        slim.learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs_/')
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)
      saver = tf.train.Saver()

      with self.assertRaises(ValueError):
        slim.learning.train(
            train_op, None, init_op=None, number_of_steps=300, saver=saver)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    self._logdir = os.path.join(self.get_temp_dir(), 'tmp_logs_/')
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(
          total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        slim.learning.train(
            train_op, self._logdir, init_op=None, number_of_steps=300)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    with tf.Graph().as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      slim.losses.log_loss(tf_predictions, tf_labels)
      total_loss = slim.losses.get_total_loss()

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)

      train_op = slim.learning.create_train_op(total_loss, optimizer)

      loss = slim.learning.train(
          train_op, self._logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def run():
    if len(sys.argv) < 3:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)
    test_data = load_data(sys.argv[2], config.dictionary, config.grammar, config.max_length)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        with tf.device('/cpu:0'):
            model.build()

            test_eval = Seq2SeqEvaluator(model, config.grammar, test_data, 'test', config.reverse_dictionary, beam_size=config.beam_size, batch_size=config.batch_size)
            loader = tf.train.Saver()

            with tf.Session() as sess:
                loader.restore(sess, os.path.join(model_dir, 'best'))

                #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
                #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

                test_eval.eval(sess, save_to_file=True)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def run():
    if len(sys.argv) < 4:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Everything Set>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)

    everything_labels, everything_label_lengths = load_programs(config, sys.argv[2])
    test_labels, test_label_lengths = load_programs(config, sys.argv[3])
    #test_labels, test_label_lengths = sample(config.grammar, test_labels, test_label_lengths)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        model.build()
        loader = tf.train.Saver()

        train_bag_of_tokens = bag_of_tokens(config, everything_labels, everything_label_lengths)
        V, mean = pca_fit(train_bag_of_tokens, n_components=2)

        eval_bag_of_tokens = bag_of_tokens(config, test_labels, test_label_lengths)
        transformed = pca_transform(eval_bag_of_tokens, V, mean)

        with tf.Session() as sess:
            loader.restore(sess, os.path.join(model_dir, 'best'))
            transformed = transformed.eval(session=sess)

        programs = reconstruct_programs(test_labels, test_label_lengths, config.grammar.tokens)
        show_pca(transformed, programs)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def check_video_id():
  tf.set_random_seed(0)  # for reproducibility
  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    # prepare a reader for each single model prediction result
    all_readers = []

    all_patterns = FLAGS.eval_data_patterns
    all_patterns = map(lambda x: x.strip(), all_patterns.strip().strip(",").split(","))
    for i in xrange(len(all_patterns)):
      reader = readers.EnsembleReader(
          feature_names=feature_names, feature_sizes=feature_sizes)
      all_readers.append(reader)

    input_reader = None
    input_data_pattern = None
    if FLAGS.input_data_pattern is not None:
      input_reader = readers.EnsembleReader(
          feature_names=["mean_rgb","mean_audio"], feature_sizes=[1024,128])
      input_data_pattern = FLAGS.input_data_pattern

    if FLAGS.eval_data_patterns is "":
      raise IOError("'eval_data_patterns' was not specified. " +
                     "Nothing to evaluate.")

    build_graph(
        all_readers=all_readers,
        input_reader=input_reader,
        input_data_pattern=input_data_pattern,
        all_eval_data_patterns=all_patterns,
        batch_size=FLAGS.batch_size)

    logging.info("built evaluation graph")
    video_id_equal = tf.get_collection("video_id_equal")[0]
    input_distance = tf.get_collection("input_distance")[0]

    check_loop(video_id_equal, input_distance, all_patterns)
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def set_global_seeds(i):
    try:
        import tensorflow as tf
    except ImportError:
        pass
    else:
        tf.set_random_seed(i)
    np.random.seed(i)
    random.seed(i)
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def __init__(self):
        """ Creates a new Env object. """
        # set seeds
        self.seed = 2305
        np.random.seed(self.seed)
        tf.set_random_seed(self.seed)

        # test case
        self.env_name = None  # name of the environment
        self.model_name = None  # name of the model
        self.test_case_name = 'test'  # name of the test
        self.baseline_test_case_name = None  # name of the test containing 'true' posterior
        self.data_dir = None

        # data
        self.input_dim = None  # number of feature
        self.output_dim = None
        self.data_size = None  # number of rows

        self.n_splits = 10
        self.current_split = 0
        self.train_x = list()
        self.train_y = list()
        self.test_x = list()
        self.test_y = list()

        # common model/sampler parameters
        self.layers_description = None
        self.model_parameters_size = None
        self.batch_size = 10
        self.chains_num = 1  # number of models to un in parallel; parameters are for each chain
        self.n_chunks = 100  # samples are drawn and stored in chunks
        self.n_samples = 100  # samples per chunk
        self.thinning = 0  # number of samples to discard

        self.sampler = None  # sampler created for current split
        self.sampler_factory = None

        # other
        self._log_handler = None