Python tensorflow 模块,Graph() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.Graph()

项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def load_language(app, tokenizer_service, tag, model_dir):
    config = Config.load(['./default.conf', './default.' + tag + '.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)

    graph = tf.Graph()
    session = tf.Session(graph=graph)
    with graph.as_default():
        # Force everything to run on CPU, we run on single inputs so there is not much point
        # on going through the GPU
        with tf.device('/cpu:0'):
            model.build()
            loader = tf.train.Saver()

        with session.as_default():
            loader.restore(session, os.path.join(model_dir, 'best'))
    tokenizer = Tokenizer(tokenizer_service, tag)
    app.add_language(tag, LanguageContext(tag, tokenizer, session, config, model))
    print('Loaded language ' + tag)
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_qrnn_linear_forward(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_linear:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=1)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_qrnn_with_previous(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_with_previous:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=2)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_create_optimizer(self):
        """Test if create optimizer does work with tf optimizers."""

        optimizer_config = {'learning_rate': 0.1}

        # test missing required entry `class`
        self.assertRaises(AssertionError, create_optimizer, optimizer_config)

        optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer'

        with tf.Session().as_default():
            # test if the optimizer is created correctlyW
            optimizer = create_optimizer(optimizer_config)
            self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer)

            # test if learning_rate variable is created with the correct value
            lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0')
            tf.get_default_session().run(tf.global_variables_initializer())
            self.assertAlmostEqual(lr_tensor.eval(), 0.1)

        optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'}

        # test missing required argument (momentum in this case)
        with tf.Graph().as_default():
            self.assertRaises(TypeError, create_optimizer, optimizer_config2)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def save(self, name_suffix: str) -> str:
        """
        Save current tensorflow graph to a checkpoint named with the given name suffix.

        The checkpoint will be locaced in self.log_dir directory.
        :param name_suffix: saved checkpoint name suffix
        :return: path to the saved checkpoint
        """
        graph_path = path.join(self._log_dir, 'model_{}.graph'.format(name_suffix))
        checkpoint_path = path.join(self._log_dir, 'model_{}.ckpt'.format(name_suffix))
        frozen_graph_path = path.join(self._log_dir, 'model_{}.pb'.format(name_suffix))

        tf.train.write_graph(self._session.graph_def, '', graph_path, as_text=False)

        self._saver.save(self._session, checkpoint_path)

        if self._freeze_graph:
            with tf.Graph().as_default():
                freeze_graph(input_graph=graph_path,
                             input_checkpoint=checkpoint_path,
                             output_node_names=self.output_names,
                             output_graph=frozen_graph_path)

        return checkpoint_path
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def load_graph(frozen_graph_filename):
    # We load the protobuf file from the disk and parse it to retrieve the
    # unserialized graph_def
    with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(
            graph_def,
            input_map=None,
            return_elements=None,
            name="prefix",
            op_dict=None,
            producer_op_list=None
        )
    return graph
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def s1_predict(config_file, model_dir, model_file, predict_file_list, out_dir):
    """
    This function serves as a test/validation tool during the model development. It is not used as
    a final product in part of the pipeline.
    """
    with open(config_file) as config_buffer:
        config = json.loads(config_buffer.read())

    with tf.Graph().as_default() as graph:
        converted_model = ConvertedModel(config, graph, 's1_keras', model_dir, model_file)

    with tf.Session(graph=graph) as sess:
        for img_file in predict_file_list:
            image = cv2.imread(img_file)
            boxes = converted_model.predict(sess, image)
            image = draw_boxes(image, boxes)

            _, filename = os.path.split(img_file)
            cv2.imwrite(os.path.join(out_dir, filename), image)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 224, 224
    endpoints = ['Conv2d_0',
                 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
                 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
                 'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
                 'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
                 'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
                 'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
                 'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
                 'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
                 'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
                 'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
                 'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
                 'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
                 'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
    for index, endpoint in enumerate(endpoints):
      with tf.Graph().as_default():
        inputs = tf.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(out_tensor.op.name.startswith(
            'MobilenetV1/' + endpoint))
        self.assertItemsEqual(endpoints[:index+1], end_points)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 299, 299
    endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
                 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
                 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
                 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
                 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']

    for index, endpoint in enumerate(endpoints):
      with tf.Graph().as_default():
        inputs = tf.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception.inception_v3_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(out_tensor.op.name.startswith(
            'InceptionV3/' + endpoint))
        self.assertItemsEqual(endpoints[:index+1], end_points)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 224, 224
    endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
                 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
                 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
                 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
                 'Mixed_5c']
    for index, endpoint in enumerate(endpoints):
      with tf.Graph().as_default():
        inputs = tf.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception.inception_v1_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(out_tensor.op.name.startswith(
            'InceptionV1/' + endpoint))
        self.assertItemsEqual(endpoints[:index+1], end_points)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def run(self):
    """Build a graph and run the model on random input data."""
    _logger.info("Creating graph.")
    with tf.Graph().as_default():
      _logger.info("Building model.")
      self.build_model_loss()

      _logger.info("Starting session.")
      config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
      with tf.Session(config=config) as sess:
        _logger.info("Initializing variables.")
        sess.run(tf.global_variables_initializer())
        _logger.info("Starting timing test.")
        self.evaluate(sess)

      _logger.info("Ending session.")
项目:tianchi_power    作者:lvniqi    | 项目源码 | 文件源码
def __init__(self,day,learning_rate = 1e-2):
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.x_predict = tf.placeholder("float", [None,_feature_length])
            self.y_ = tf.placeholder("float", [None,1])
            #layer fc 1
            w_1 = tf.get_variable('all/w_1', [_feature_length,],
                                      initializer=tf.random_normal_initializer())
            #zoom layer
            w_zoom = tf.get_variable('all/w_zoom', [1,],
                                      initializer=tf.random_normal_initializer())
            #0.8~1.2
            self.zoom = tf.nn.sigmoid(w_zoom)*0.4+0.8
            self.percent = tf.nn.softmax(w_1)*self.zoom
            self.y_p = tf.reduce_sum(self.x_predict*self.percent,1)
            self.y_p = tf.reshape(self.y_p,[-1,1])
            self.error_rate = tf.reduce_mean(tf.abs(self.y_-self.y_p)/self.y_)
            self.mse = tf.reduce_mean(tf.abs(self.y_-self.y_p))
            #self.mse = self.error_rate
            self.optimizer = tf.train.AdamOptimizer(learning_rate)
            self.train_step = self.optimizer.minimize(self.mse)
            self.sess = tf.Session(graph = self.graph)
            self.sess.run(tf.global_variables_initializer())
项目:tensorflow-action-conditional-video-prediction    作者:williamd4112    | 项目源码 | 文件源码
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def export(self, last_checkpoint, output_dir):
    """Builds a prediction graph and xports the model.

    Args:
      last_checkpoint: The latest checkpoint from training.
      output_dir: Path to the folder to be used to output the model.
    """
    logging.info('Exporting prediction graph to %s', output_dir)
    with tf.Session(graph=tf.Graph()) as sess:
      # Build and save prediction meta graph and trained variable values.
      self.build_prediction_graph()
      # Remove this if once Tensorflow 0.12 is standard.
      try:
        init_op = tf.global_variables_initializer()
      except AttributeError:
        init_op = tf.initialize_all_variables()
      sess.run(init_op)
      trained_saver = tf.train.Saver()
      trained_saver.restore(sess, last_checkpoint)
      saver = tf.train.Saver()
      saver.export_meta_graph(filename=os.path.join(output_dir, 'export.meta'))
      saver.save(
          sess, os.path.join(output_dir, 'export'), write_meta_graph=False)
项目:tf-cnn-lstm-ocr-captcha    作者:Luonic    | 项目源码 | 文件源码
def evaluate():
    """Eval ocr for a number of steps."""
    with tf.Graph().as_default() as g:
        images, labels, seq_lengths = ocr.inputs()
        logits, timesteps = ocr.inference(images, FLAGS.eval_batch_size, train=True)
        ler = ocr.create_label_error_rate(logits, labels, timesteps)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        config = tf.ConfigProto(
            device_count={'GPU': 0}
        )
        sess = tf.Session(config=config)
        sess.run(init_op)

        saver = tf.train.Saver()

        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, ler, summary_op)
            if FLAGS.run_once:
                break
            # print("Waiting for next evaluation for " + str(FLAGS.eval_interval_secs) + " sec")
            time.sleep(FLAGS.eval_interval_secs)
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def main(args):
    with tf.Graph().as_default() as graph:
        # Create dataset
        logging.info('Create data flow from %s' % args.data)
        caffe_dataset = CaffeDataset(dir=args.data, num_act=args.num_act, mean_path=args.mean)

        # Config session
        config = get_config(args)

        x = tf.placeholder(dtype=tf.float32, shape=[None, 84, 84, 12])
        op = load_caffe_model(x, args.load)

        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

        # Start session
        with tf.Session(config=config) as sess:
            sess.run(init)
            i = 0
            for s, a in caffe_dataset(5):
                pred_data = sess.run([op], feed_dict={x: [s]})[0]
                print pred_data.shape
                np.save('tf-%03d.npy' % i, pred_data)
                i += 1
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, [])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, '')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateOnecloneWithPS(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1,
                                                    num_ps_tasks=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(clones), 1)
      clone = clones[0]
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertDeviceEqual(clone.device, '/job:worker')
      self.assertEqual(clone.scope, '')
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
        self.assertDeviceEqual(v.device, v.value().device)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, '')
        self.assertDeviceEqual(v.device, 'CPU:0')
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testNoSummariesOnGPUForEvals(self):
    with tf.Graph().as_default():
      deploy_config = model_deploy.DeploymentConfig(num_clones=2)

      # clone function creates a fully_connected layer with a regularizer loss.
      def ModelFn():
        inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
        reg = tf.contrib.layers.l2_regularizer(0.001)
        tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)

      # No optimizer here, it's an eval.
      model = model_deploy.deploy(deploy_config, ModelFn)
      # The model summary op should have a few summary inputs and all of them
      # should be on the CPU.
      self.assertTrue(model.summary_op.op.inputs)
      for inp in  model.summary_op.op.inputs:
        self.assertEqual('/device:CPU:0', inp.device)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 299, 299
    endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
                 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3',
                 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
                 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
                 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']

    for index, endpoint in enumerate(endpoints):
      with tf.Graph().as_default():
        inputs = tf.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception.inception_v3_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(out_tensor.op.name.startswith(
            'InceptionV3/' + endpoint))
        self.assertItemsEqual(endpoints[:index+1], end_points)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testBuildOnlyUpToFinalEndpoint(self):
    batch_size = 5
    height, width = 299, 299
    all_endpoints = [
        'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
        'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
        'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
        'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
        'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
    for index, endpoint in enumerate(all_endpoints):
      with tf.Graph().as_default():
        inputs = tf.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception.inception_v4_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(out_tensor.op.name.startswith(
            'InceptionV4/' + endpoint))
        self.assertItemsEqual(all_endpoints[:index+1], end_points)
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testBuildOnlyUptoFinalEndpoint(self):
    batch_size = 5
    height, width = 224, 224
    endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
                 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
                 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
                 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
                 'Mixed_5c']
    for index, endpoint in enumerate(endpoints):
      with tf.Graph().as_default():
        inputs = tf.random_uniform((batch_size, height, width, 3))
        out_tensor, end_points = inception.inception_v1_base(
            inputs, final_endpoint=endpoint)
        self.assertTrue(out_tensor.op.name.startswith(
            'InceptionV1/' + endpoint))
        self.assertItemsEqual(endpoints[:index+1], end_points)
项目:generating_sequences    作者:PFCM    | 项目源码 | 文件源码
def test_get_model(self):
        """Just make sure we can get a model without errors"""
        # TODO(pfcm) nice helpers for setting up/tearing down a graph & sess
        with tf.Graph().as_default():
            inputs = tf.placeholder(tf.float32, [50, 30, 10])
            cell = tf.nn.rnn_cell.BasicRNNCell(32)

            istate, logits, fstate = ns.standard_nextstep_inference(
                cell, inputs, 5)

            # check shapes are as expected
            self.assertEqual(istate[0].get_shape().as_list(),
                             [30, 32])
            self.assertEqual(len(logits), 50)
            self.assertEqual(logits[0].get_shape().as_list(),
                             [30, 5])
            self.assertEqual(istate[0].get_shape().as_list(),
                             fstate[0].get_shape().as_list())
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def _multiple_images(input_image, which_styles, output_dir):
  """Stylizes an image into a set of styles and writes them to disk."""
  with tf.Graph().as_default(), tf.Session() as sess:
    stylized_images = model.transform(
        tf.concat([input_image for _ in range(len(which_styles))], 0),
        normalizer_params={
            'labels': tf.constant(which_styles),
            'num_categories': FLAGS.num_styles,
            'center': True,
            'scale': True})
    _load_checkpoint(sess, FLAGS.checkpoint)

    stylized_images = stylized_images.eval()
    for which, stylized_image in zip(which_styles, stylized_images):
      image_utils.save_np_image(
          stylized_image[None, ...],
          '{}/{}_{}.png'.format(output_dir, FLAGS.output_basename, which))
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def _multiple_styles(input_image, which_styles, output_dir):
  """Stylizes image into a linear combination of styles and writes to disk."""
  with tf.Graph().as_default(), tf.Session() as sess:
    mixture = _style_mixture(which_styles, FLAGS.num_styles)
    stylized_images = model.transform(
        input_image,
        normalizer_fn=ops.weighted_instance_norm,
        normalizer_params={
            'weights': tf.constant(mixture),
            'num_categories': FLAGS.num_styles,
            'center': True,
            'scale': True})
    _load_checkpoint(sess, FLAGS.checkpoint)

    stylized_image = stylized_images.eval()
    image_utils.save_np_image(
        stylized_image,
        os.path.join(output_dir, '%s_%s.png' % (
            FLAGS.output_basename, _describe_style(which_styles))))
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def test(checkpoint_path, test_dir, examples_path, hparams,
         num_batches=None):
  """Evaluate the model at a single checkpoint."""
  tf.gfile.MakeDirs(test_dir)

  _trial_summary(hparams, examples_path, test_dir)
  with tf.Graph().as_default():
    transcription_data = _get_data(
        examples_path, hparams, is_training=False)
    unused_loss, losses, labels, predictions, images = model.get_model(
        transcription_data, hparams, is_training=False)

    metrics_to_values, metrics_to_updates = _get_eval_metrics(
        losses, labels, predictions, images, hparams)

    metric_values = slim.evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        logdir=test_dir,
        num_evals=num_batches or transcription_data.num_batches,
        eval_op=metrics_to_updates.values(),
        final_op=metrics_to_values.values())

    metrics_to_values = dict(zip(metrics_to_values.keys(), metric_values))
    for metric in metrics_to_values:
      print('%s: %f' % (metric, metrics_to_values[metric]))
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def run():
    if len(sys.argv) < 3:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)
    test_data = load_data(sys.argv[2], config.dictionary, config.grammar, config.max_length)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        with tf.device('/cpu:0'):
            model.build()

            test_eval = Seq2SeqEvaluator(model, config.grammar, test_data, 'test', config.reverse_dictionary, beam_size=config.beam_size, batch_size=config.batch_size)
            loader = tf.train.Saver()

            with tf.Session() as sess:
                loader.restore(sess, os.path.join(model_dir, 'best'))

                #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
                #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

                test_eval.eval(sess, save_to_file=True)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def run():
    if len(sys.argv) < 4:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Everything Set>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)

    everything_labels, everything_label_lengths = load_programs(config, sys.argv[2])
    test_labels, test_label_lengths = load_programs(config, sys.argv[3])
    #test_labels, test_label_lengths = sample(config.grammar, test_labels, test_label_lengths)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        model.build()
        loader = tf.train.Saver()

        train_bag_of_tokens = bag_of_tokens(config, everything_labels, everything_label_lengths)
        V, mean = pca_fit(train_bag_of_tokens, n_components=2)

        eval_bag_of_tokens = bag_of_tokens(config, test_labels, test_label_lengths)
        transformed = pca_transform(eval_bag_of_tokens, V, mean)

        with tf.Session() as sess:
            loader.restore(sess, os.path.join(model_dir, 'best'))
            transformed = transformed.eval(session=sess)

        programs = reconstruct_programs(test_labels, test_label_lengths, config.grammar.tokens)
        show_pca(transformed, programs)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def check_video_id():
  tf.set_random_seed(0)  # for reproducibility
  with tf.Graph().as_default():
    # convert feature_names and feature_sizes to lists of values
    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        FLAGS.feature_names, FLAGS.feature_sizes)

    # prepare a reader for each single model prediction result
    all_readers = []

    all_patterns = FLAGS.eval_data_patterns
    all_patterns = map(lambda x: x.strip(), all_patterns.strip().strip(",").split(","))
    for i in xrange(len(all_patterns)):
      reader = readers.EnsembleReader(
          feature_names=feature_names, feature_sizes=feature_sizes)
      all_readers.append(reader)

    input_reader = None
    input_data_pattern = None
    if FLAGS.input_data_pattern is not None:
      input_reader = readers.EnsembleReader(
          feature_names=["mean_rgb","mean_audio"], feature_sizes=[1024,128])
      input_data_pattern = FLAGS.input_data_pattern

    if FLAGS.eval_data_patterns is "":
      raise IOError("'eval_data_patterns' was not specified. " +
                     "Nothing to evaluate.")

    build_graph(
        all_readers=all_readers,
        input_reader=input_reader,
        input_data_pattern=input_data_pattern,
        all_eval_data_patterns=all_patterns,
        batch_size=FLAGS.batch_size)

    logging.info("built evaluation graph")
    video_id_equal = tf.get_collection("video_id_equal")[0]
    input_distance = tf.get_collection("input_distance")[0]

    check_loop(video_id_equal, input_distance, all_patterns)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0]
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_qrnn(self):
        print("QRNN Working check")
        with tf.Graph().as_default() as qrnn:
            self.check_by_digits(qrnn, qrnn=5)
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_baseline(self):
        print("Baseline(LSTM) Working check")
        with tf.Graph().as_default() as baseline:
            self.check_by_digits(baseline, baseline=True)
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_random(self):
        print("Random Working check")
        with tf.Graph().as_default() as random:
            self.check_by_digits(random, random=True)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def load_metadata(model_dir):
  """Loads RunMetadata, Graph and OpLog from files
  """
  # Import RunMetadata
  run_meta_path = os.path.join(model_dir, "metadata/run_meta")
  run_meta = tf.RunMetadata()
  if gfile.Exists(run_meta_path):
    with gfile.GFile(run_meta_path, "rb") as file:
      run_meta.MergeFromString(file.read())
    print("Loaded RunMetadata from {}".format(run_meta_path))
  else:
    print("RunMetadata does not exist a {}. Skipping.".format(run_meta_path))

  # Import Graph
  graph_def_path = os.path.join(model_dir, "graph.pbtxt")
  graph = tf.Graph()
  if gfile.Exists(graph_def_path):
    with graph.as_default():
      _register_function_ops(CUSTOM_OP_FUNCTIONS)
      graph_def = tf.GraphDef()
      with gfile.GFile(graph_def_path, "rb") as file:
        text_format.Parse(file.read(), graph_def)
      tf.import_graph_def(graph_def, name="")
      print("Loaded Graph from {}".format(graph_def_path))
  else:
    print("Graph does not exist a {}. Skipping.".format(graph_def_path))

  # Import OpLog
  op_log_path = os.path.join(model_dir, "metadata/tfprof_log")
  op_log = tfprof_log_pb2.OpLog()
  if gfile.Exists(op_log_path):
    with gfile.GFile(op_log_path, "rb") as file:
      op_log.MergeFromString(file.read())
      print("Loaded OpLog from {}".format(op_log_path))
  else:
    print("OpLog does not exist a {}. Skipping.".format(op_log_path))

  return run_meta, graph, op_log
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def evaluate():
    """Eval BBBC006 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for BBBC006.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = bbbc006.inputs(eval_data=eval_data)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        c_fuse, s_fuse = bbbc006.inference(images, train=False)

        dice_op = bbbc006.dice_op(c_fuse, s_fuse, labels)

        # Restore the moving average version of the learned variables for eval.
        variable_averages = tf.train.ExponentialMovingAverage(
            bbbc006.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, dice_op, summary_writer, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def tower_loss(scope, images, labels):
    """Calculate the total loss on a single tower running the BBBC006 model.
    Args:
      scope: unique prefix string identifying the BBBC006 tower, e.g. 'tower_0'
      images: Images. 4D tensor of shape [batch_size, height, width, 3].
      labels: Labels. 1D tensor of shape [batch_size].

    Returns:
       Tensor of shape [] containing the total loss for a batch of data
    """

    # Build inference Graph.
    c_fuse, s_fuse = bbbc006.inference(images)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = bbbc006.loss(c_fuse, s_fuse, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on tensorboard.
        loss_name = re.sub('%s_[0-9]*/' % bbbc006.TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)

    return total_loss
项目:han    作者:croath    | 项目源码 | 文件源码
def load_graph(frozen_graph_filename):
    # We load the protobuf file from the disk and parse it to retrieve the
    # unserialized graph_def
    with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    # Then, we can use again a convenient built-in function to import a graph_def into the
    # current default Graph
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(
            graph_def
        )
    return graph
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def run(self):
    """Run the benchmark task assigned to this process.

    Returns:
      Dictionary of statistics for training or eval.
    Raises:
       ValueError: unrecognized job name.
    """
    if self.params.job_name == 'ps':
      log_fn('Running parameter server %s' % self.task_index)
      self.cluster_manager.join_server()
      return {}

    # For distributed_all_reduce with multiple workers, drive
    # from a separate controller process.
    if self.params.variable_update == 'distributed_all_reduce':
      if self.params.job_name == 'worker':
        log_fn('Starting worker %s' % self.task_index)
        self.cluster_manager.join_server()
        return
      elif self.params.job_name and self.params.job_name != 'controller':
        raise ValueError('unrecognized job name: %s' % self.params.job_name)

    with tf.Graph().as_default():
      if self.params.eval:
        return self._eval_cnn()
      else:
        return self._benchmark_cnn()
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def graph(self) -> tf.Graph:
        """TF graph object."""
        return self._graph
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def __init__(self, fnames, shuffle=True, num_epochs=None):
    """Init from a list of filenames to enqueue.

    Args:
      fnames: list of .tfrecords filenames to enqueue.
      shuffle: if true, shuffle the list at each epoch
    """
    self._fnames = fnames
    self._fname_queue = tf.train.string_input_producer(
        self._fnames,
        capacity=1000,
        shuffle=shuffle,
        num_epochs=num_epochs,
        shared_name='input_files')
    self._reader = tf.TFRecordReader()

    # Read first record to initialize the shape parameters
    with tf.Graph().as_default():
      fname_queue = tf.train.string_input_producer(self._fnames)
      reader = tf.TFRecordReader()
      _, serialized = reader.read(fname_queue)
      shapes = self._parse_shape(serialized)
      dtypes = self._parse_dtype(serialized)

      config = tf.ConfigProto()
      config.gpu_options.allow_growth = True

      with tf.Session(config=config) as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        self.shapes = sess.run(shapes)
        self.shapes = {k: self.shapes[k+'_sz'].tolist() for k in self.FEATURES}

        self.dtypes = sess.run(dtypes)
        self.dtypes = {k: REVERSE_TYPEMAP[self.dtypes[k+'_dtype'][0]] for k in self.FEATURES}

        coord.request_stop()
        coord.join(threads)
项目:ddpg-aigym    作者:stevenpjg    | 项目源码 | 文件源码
def __init__(self,num_states,num_actions):
        self.g=tf.Graph()
        with self.g.as_default():
            self.sess = tf.InteractiveSession()


            #actor network model parameters:
            self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a,\
            self.actor_state_in, self.actor_model = self.create_actor_net(num_states, num_actions)


            #target actor network model parameters:
            self.t_W1_a, self.t_B1_a, self.t_W2_a, self.t_B2_a, self.t_W3_a, self.t_B3_a,\
            self.t_actor_state_in, self.t_actor_model = self.create_actor_net(num_states, num_actions)

            #cost of actor network:
            self.q_gradient_input = tf.placeholder("float",[None,num_actions]) #gets input from action_gradient computed in critic network file
            self.actor_parameters = [self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a]
            self.parameters_gradients = tf.gradients(self.actor_model,self.actor_parameters,-self.q_gradient_input)#/BATCH_SIZE) 
            self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(zip(self.parameters_gradients,self.actor_parameters))  
            #initialize all tensor variable parameters:
            self.sess.run(tf.initialize_all_variables())    

            #To make sure actor and target have same intial parmameters copy the parameters:
            # copy target parameters
            self.sess.run([
                self.t_W1_a.assign(self.W1_a),
                self.t_B1_a.assign(self.B1_a),
                self.t_W2_a.assign(self.W2_a),
                self.t_B2_a.assign(self.B2_a),
                self.t_W3_a.assign(self.W3_a),
                self.t_B3_a.assign(self.B3_a)])

            self.update_target_actor_op = [
                self.t_W1_a.assign(TAU*self.W1_a+(1-TAU)*self.t_W1_a),
                self.t_B1_a.assign(TAU*self.B1_a+(1-TAU)*self.t_B1_a),
                self.t_W2_a.assign(TAU*self.W2_a+(1-TAU)*self.t_W2_a),
                self.t_B2_a.assign(TAU*self.B2_a+(1-TAU)*self.t_B2_a),
                self.t_W3_a.assign(TAU*self.W3_a+(1-TAU)*self.t_W3_a),
                self.t_B3_a.assign(TAU*self.B3_a+(1-TAU)*self.t_B3_a)]
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def predict(model_scope, result_dir, result_file, img_features, k=1):
        """
        Args:
            model_scope: The variable_scope used when this model was trained.
            result_dir: The full path to the folder in which the result file locates.
            result_file: The file that saves the training results.
            img_features: A 2-D ndarray (matrix) each row of which holds the pixels as
                features of one image. One or more rows (image samples) can be requested
                to be predicted at once.
            k: Optional. Number of elements to be predicted.
        Returns:
            values and indices. Refer to tf.nn.top_k for details.
        """
        with tf.Session(graph=tf.Graph()) as sess:
            saver = tf.train.import_meta_graph(os.path.join(result_dir, result_file + ".meta"))
            saver.restore(sess, os.path.join(result_dir, result_file))

            # Retrieve the Ops we 'remembered'.
            logits = tf.get_collection(model_scope+"logits")[0]
            images_placeholder = tf.get_collection(model_scope+"images")[0]
            keep_prob_placeholder = tf.get_collection(model_scope+"keep_prob")[0]

            # Add an Op that chooses the top k predictions. Apply softmax so that
            # we can have the probabilities (percentage) in the output.
            eval_op = tf.nn.top_k(tf.nn.softmax(logits), k=k)

            values, indices = sess.run(eval_op, feed_dict={images_placeholder: img_features,
                                                           keep_prob_placeholder: 1.0})

            return values, indices
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def main(_):
    img = load_image("data/cat.jpg")
    print img
    img_p = preprocess(img)

    for layers in [50, 101, 152]:
        g = tf.Graph()
        with g.as_default():
            print "CONVERT", layers
            convert(g, img, img_p, layers)