Python tensorflow 模块,get_default_graph() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.get_default_graph()

项目:tensorflow-yolo    作者:hjimce    | 项目源码 | 文件源码
def build_from_pb(self):
        with tf.gfile.FastGFile(self.FLAGS.pbLoad, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        tf.import_graph_def(
            graph_def,
            name=""
        )
        with open(self.FLAGS.metaLoad, 'r') as fp:
            self.meta = json.load(fp)
        self.framework = create_framework(self.meta, self.FLAGS)

        # Placeholders
        self.inp = tf.get_default_graph().get_tensor_by_name('input:0')
        self.feed = dict() # other placeholders
        self.out = tf.get_default_graph().get_tensor_by_name('output:0')

        self.setup_meta_ops()
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_create_optimizer(self):
        """Test if create optimizer does work with tf optimizers."""

        optimizer_config = {'learning_rate': 0.1}

        # test missing required entry `class`
        self.assertRaises(AssertionError, create_optimizer, optimizer_config)

        optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer'

        with tf.Session().as_default():
            # test if the optimizer is created correctlyW
            optimizer = create_optimizer(optimizer_config)
            self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer)

            # test if learning_rate variable is created with the correct value
            lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0')
            tf.get_default_session().run(tf.global_variables_initializer())
            self.assertAlmostEqual(lr_tensor.eval(), 0.1)

        optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'}

        # test missing required argument (momentum in this case)
        with tf.Graph().as_default():
            self.assertRaises(TypeError, create_optimizer, optimizer_config2)
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def train_model(self, num_steps=100):
        x_train = [1, 2, 3, 4]
        y_train = [0, -1, -2, -3]
        x = tf.get_default_graph().get_tensor_by_name('model_0/x:0')
        y = tf.get_default_graph().get_tensor_by_name('model_0/y:0')
        feed_dict = {x: x_train, y: y_train}

        pre_global_step = self.sess.run(self.global_step)
        for step in range(num_steps):
            train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
            self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))

        post_global_step = self.sess.run(self.global_step)
        self.assertEqual(pre_global_step + num_steps, post_global_step)
        self.step += num_steps
        return train_res
项目:how_to_deploy_a_keras_model_to_production    作者:llSourcell    | 项目源码 | 文件源码
def init(): 
    json_file = open('model.json','r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    #load woeights into new model
    loaded_model.load_weights("model.h5")
    print("Loaded Model from disk")

    #compile and evaluate loaded model
    loaded_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
    #loss,accuracy = model.evaluate(X_test,y_test)
    #print('loss:', loss)
    #print('accuracy:', accuracy)
    graph = tf.get_default_graph()

    return loaded_model,graph
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def load_frozen_graph(graph_dir, fix_nodes=True, entry=None, output=None):
        with gfile.FastGFile(graph_dir, "rb") as file:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(file.read())
            if fix_nodes:
                for node in graph_def.node:
                    if node.op == 'RefSwitch':
                        node.op = 'Switch'
                        for index in range(len(node.input)):
                            if 'moving_' in node.input[index]:
                                node.input[index] = node.input[index] + '/read'
                    elif node.op == 'AssignSub':
                        node.op = 'Sub'
                        if 'use_locking' in node.attr:
                            del node.attr['use_locking']
            tf.import_graph_def(graph_def, name="")
            if entry is not None:
                entry = tf.get_default_graph().get_tensor_by_name(entry)
            if output is not None:
                output = tf.get_default_graph().get_tensor_by_name(output)
            return entry, output
项目:tea    作者:antorsae    | 项目源码 | 文件源码
def init_segmenter(args_segmenter_model):
    global segmenter_model, rings, sectors, points_per_ring, is_ped, tf_segmenter_graph
    segmenter_model = load_model(args_segmenter_model, compile=False)
    segmenter_model._make_predict_function() # https://github.com/fchollet/keras/issues/6124
    print("Loading segmenter model " + args_segmenter_model)
    segmenter_model.summary()
    points_per_ring = segmenter_model.get_input_shape_at(0)[0][1]
    match = re.search(r'lidarnet-(car|ped)-.*seg-rings_(\d+)_(\d+)-sectors_(\d+)-.*\.hdf5', args_segmenter_model)
    is_ped = match.group(1) == 'ped'
    rings = range(int(match.group(2)), int(match.group(3)))
    sectors = int(match.group(4))
    points_per_ring *= sectors
    assert len(rings) == segmenter_model.get_input_shape_at(0)[0][2]
    print('Loaded segmenter model with ' + str(points_per_ring) + ' points per ring and ' + str(len(rings)) +
          ' rings from ' + str(rings[0]) + ' to ' + str(rings[-1]) )

    if K._backend == 'tensorflow':
        tf_segmenter_graph = tf.get_default_graph()
        print(tf_segmenter_graph)
    return
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def test_set_value():
    a = tf.Variable(42.)
    with single_threaded_session():
        set_value(a, 5)
        assert a.eval() == 5
        g = tf.get_default_graph()
        g.finalize()
        set_value(a, 6)
        assert a.eval() == 6

        # test the test
        try:
            assert a.eval() == 7
        except AssertionError:
            pass
        else:
            assert False, "assertion should have failed"
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def test_set_value():
    a = tf.Variable(42.)
    with single_threaded_session():
        set_value(a, 5)
        assert a.eval() == 5
        g = tf.get_default_graph()
        g.finalize()
        set_value(a, 6)
        assert a.eval() == 6

        # test the test
        try:
            assert a.eval() == 7
        except AssertionError:
            pass
        else:
            assert False, "assertion should have failed"
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def test_set_value():
    a = tf.Variable(42.)
    with single_threaded_session():
        set_value(a, 5)
        assert a.eval() == 5
        g = tf.get_default_graph()
        g.finalize()
        set_value(a, 6)
        assert a.eval() == 6

        # test the test
        try:
            assert a.eval() == 7
        except AssertionError:
            pass
        else:
            assert False, "assertion should have failed"
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def _sample(self):
        gan = self.gan
        z_t = gan.encoder.sample
        inputs_t = gan.inputs.x

        if self.z is None:
            self.z = gan.encoder.sample.eval()
            self.target = gan.encoder.sample.eval()
            self.input = gan.session.run(gan.inputs.x)

        if self.step > self.steps:
            self.z = self.target
            self.target = gan.encoder.sample.eval()
            self.step = 0

        percent = float(self.step)/self.steps
        z_interp = self.z*(1.0-percent) + self.target*percent
        self.step+=1

        g=tf.get_default_graph()
        with g.as_default():
            tf.set_random_seed(1)
            return {
                'generator': gan.session.run(gan.generator.sample, feed_dict={z_t: z_interp, inputs_t: self.input})
            }
项目:baselines    作者:openai    | 项目源码 | 文件源码
def test_set_value():
    a = tf.Variable(42.)
    with single_threaded_session():
        set_value(a, 5)
        assert a.eval() == 5
        g = tf.get_default_graph()
        g.finalize()
        set_value(a, 6)
        assert a.eval() == 6

        # test the test
        try:
            assert a.eval() == 7
        except AssertionError:
            pass
        else:
            assert False, "assertion should have failed"
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def test():
    with tf.Graph().as_default():
        image, label = input.get_input(LABEL_PATH, LABEL_FORMAT, IMAGE_PATH, IMAGE_FORMAT)
        logits = model.inference(image)
        top_k_op = tf.nn.in_top_k(logits, label, 1)

        variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # Get summaries for TENSOR BOARD
        summary_op = tf.merge_all_summaries()
        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer = tf.train.SummaryWriter(input.FLAGS.eval_dir, graph_def=graph_def)

        while True:
            evaluate_model(saver, summary_writer, top_k_op, summary_op)
            if input.FLAGS.run_once:
                break
            time.sleep(input.FLAGS.eval_interval_secs)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testScopeRestore(self):
    c1 = conv.Conv2D(
        16,
        8,
        4,
        name='conv_2d_0',
        padding=conv.VALID,
        initializers={
            'w':
                initializers.restore_initializer(
                    _checkpoint(), 'w', scope='agent/conv_net_2d/conv_2d_0'),
            'b':
                initializers.restore_initializer(
                    _checkpoint(), 'b', scope='agent/conv_net_2d/conv_2d_0')
        })

    inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3])
    outputs = c1(inputs)
    init = tf.global_variables_initializer()
    tf.get_default_graph().finalize()
    with self.test_session() as session:
      session.run(init)
      o = session.run(outputs)

    self.assertAllClose(np.linalg.norm(o), _ONE_CONV_LAYER, atol=_TOLERANCE)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testModuleInfo_multiple_subgraph(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a")
    ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    dumb(ph_0)
    with tf.name_scope("foo"):
      dumb(ph_0)
    def check():
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      self.assertEqual(len(sonnet_collection), 1)
      self.assertEqual(len(sonnet_collection[0].connected_subgraphs), 2)
      connected_subgraph_0 = sonnet_collection[0].connected_subgraphs[0]
      connected_subgraph_1 = sonnet_collection[0].connected_subgraphs[1]
      self.assertEqual(connected_subgraph_0.name_scope, "dumb_a")
      self.assertEqual(connected_subgraph_1.name_scope, "foo/dumb_a")
    check()
    _copy_default_graph()
    check()
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testModuleInfo_sparsetensor(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a")
    sparse_tensor = tf.SparseTensor(
        indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)),
        values=tf.placeholder(dtype=tf.float32, shape=(10,)),
        dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,)))
    dumb(sparse_tensor)
    def check():
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
      self.assertIsInstance(
          connected_subgraph.inputs["inputs"], tf.SparseTensor)
      self.assertIsInstance(connected_subgraph.outputs, tf.SparseTensor)
    check()
    _copy_default_graph()
    check()
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testModuleInfo_tuple(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a")
    ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    dumb((ph_0, ph_1))
    def check():
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
      self.assertIsInstance(connected_subgraph.inputs["inputs"], tuple)
      self.assertIsInstance(connected_subgraph.outputs, tuple)
    check()
    _copy_default_graph()
    check()
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testModuleInfo_dict(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a")
    ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    dumb({"ph_0": ph_0, "ph_1": ph_1})
    def check():
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
      self.assertIsInstance(connected_subgraph.inputs["inputs"], dict)
      self.assertIsInstance(connected_subgraph.outputs, dict)
    check()
    _copy_default_graph()
    check()
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testModuleInfo_recursion(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a", no_nest=True)
    ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    val = {"one": ph_0, "self": None}
    val["self"] = val
    dumb(val)
    def check(check_type):
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
      self.assertIsInstance(connected_subgraph.inputs["inputs"]["one"],
                            tf.Tensor)
      self.assertIsInstance(
          connected_subgraph.inputs["inputs"]["self"], check_type)
      self.assertIsInstance(connected_subgraph.outputs["one"], tf.Tensor)
      self.assertIsInstance(connected_subgraph.outputs["self"], check_type)
    check(dict)
    _copy_default_graph()
    check(base_info._UnserializableObject)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def _check_same_graph(self):
    """Checks that the module is not being connect to multiple Graphs.

    An instance of a Sonnet module 'owns' the variables it contains, and permits
    seamless variable sharing. As such, connecting a single module instance to
    multiple Graphs is not possible - this function will raise an error should
    that occur.

    Raises:
      DifferentGraphError: if the module is connected to a different Graph than
        it was previously used in.
    """
    current_graph = tf.get_default_graph()
    if self._graph is None:
      self._graph = current_graph
      self._set_module_info()
    elif self._graph != current_graph:
      raise DifferentGraphError("Cannot connect module to multiple Graphs.")
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testInferFeatureSchema(self):
    d = tf.placeholder(tf.int64, None)
    tensors = {
        'a': tf.placeholder(tf.float32, (None,)),
        'b': tf.placeholder(tf.string, (1, 2, 3)),
        'c': tf.placeholder(tf.int64, None),
        'd': d
    }
    d_column_schema = sch.ColumnSchema(tf.int64, [1, 2, 3],
                                       sch.FixedColumnRepresentation())
    api.set_column_schema(d, d_column_schema)
    schema = impl_helper.infer_feature_schema(tf.get_default_graph(), tensors)
    expected_schema = sch.Schema(column_schemas={
        'a': sch.ColumnSchema(tf.float32, [],
                              sch.FixedColumnRepresentation()),
        'b': sch.ColumnSchema(tf.string, [2, 3],
                              sch.FixedColumnRepresentation()),
        'c': sch.ColumnSchema(tf.int64, None,
                              sch.FixedColumnRepresentation()),
        'd': sch.ColumnSchema(tf.int64, [1, 2, 3],
                              sch.FixedColumnRepresentation())
    })
    self.assertEqual(schema, expected_schema)
项目:tefla    作者:litan    | 项目源码 | 文件源码
def _print_info(self, data_set, verbose):
        logger.info('Config:')
        logger.info(pprint.pformat(self.cnf))
        data_set.print_info()
        logger.info('Max epochs: %d' % self.num_epochs)
        if verbose > 0:
            util.show_vars(logger, self.trainable_scopes)

        # logger.debug("\n---Number of Regularizable vars in model:")
        # logger.debug(len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))

        if verbose > 3:
            all_ops = tf.get_default_graph().get_operations()
            logger.debug("\n---All ops in graph")
            names = map(lambda v: v.name, all_ops)
            for n in sorted(names):
                logger.debug(n)

        util.show_layer_shapes(self.training_end_points, logger)
项目:tefla    作者:litan    | 项目源码 | 文件源码
def _print_info(self, data_set, verbose):
        logger.info('Config:')
        logger.info(pprint.pformat(self.cnf))
        data_set.print_info()
        logger.info('Max epochs: %d' % self.num_epochs)
        if verbose > 0:
            util.show_vars(logger, self.trainable_scopes)

        # logger.debug("\n---Number of Regularizable vars in model:")
        # logger.debug(len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))

        if verbose > 3:
            all_ops = tf.get_default_graph().get_operations()
            logger.debug("\n---All ops in graph")
            names = map(lambda v: v.name, all_ops)
            for n in sorted(names):
                logger.debug(n)

        util.show_layer_shapes(self.training_end_points, logger)
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def _define_saver_summary(self, summary = True):
        """ Create Summary and Saver
        Args:
            logdir_train        : Path to train summary directory
            logdir_test     : Path to test summary directory
        """
        if (self.logdir_train == None) or (self.logdir_test == None):
            raise ValueError('Train/Test directory not assigned')
        else:
            with tf.device(self.cpu):
                self.saver = tf.train.Saver()
            if summary:
                with tf.device(self.gpu):
                    self.train_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())
                    self.test_summary = tf.summary.FileWriter(self.logdir_test)
                    #self.weight_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())
项目:nfm    作者:faychu    | 项目源码 | 文件源码
def _initialize_weights(self):
        all_weights = dict()
        if self.pretrain_flag > 0:
            weight_saver = tf.train.import_meta_graph(self.save_file + '.meta')
            pretrain_graph = tf.get_default_graph()
            feature_embeddings = pretrain_graph.get_tensor_by_name('feature_embeddings:0')
            feature_bias = pretrain_graph.get_tensor_by_name('feature_bias:0')
            bias = pretrain_graph.get_tensor_by_name('bias:0')
            with tf.Session() as sess:
                weight_saver.restore(sess, self.save_file)
                fe, fb, b = sess.run([feature_embeddings, feature_bias, bias])
            all_weights['feature_embeddings'] = tf.Variable(fe, dtype=tf.float32)
            all_weights['feature_bias'] = tf.Variable(fb, dtype=tf.float32)
            all_weights['bias'] = tf.Variable(b, dtype=tf.float32)
        else:
            all_weights['feature_embeddings'] = tf.Variable(
                tf.random_normal([self.features_M, self.hidden_factor], 0.0, 0.01),
                name='feature_embeddings')  # features_M * K
            all_weights['feature_bias'] = tf.Variable(
                tf.random_uniform([self.features_M, 1], 0.0, 0.0), name='feature_bias')  # features_M * 1
            all_weights['bias'] = tf.Variable(tf.constant(0.0), name='bias')  # 1 * 1
        return all_weights
项目:EEGSignalAnalysis    作者:pprakhar30    | 项目源码 | 文件源码
def Get_Pre_Trained_Weights(input_vars,name):
    with open("vgg16.tfmodel", mode='rb') as f:
        fileContent = f.read()

    graph_def = tf.GraphDef()
    graph_def.ParseFromString(fileContent)
    images = tf.placeholder(tf.float32,shape = (None, 64, 64, 3),name=name)
    tf.import_graph_def(graph_def, input_map={ "images": images })
    print "graph loaded from disk"

    graph = tf.get_default_graph()
    with tf.Session() as sess:
        init = tf.initialize_all_variables()
        sess.run(init)
        #batch = np.reshape(input_vars,(-1, 224, 224, 3))
        n_timewin = 7   
        convnets = []
        for i in xrange(n_timewin):
            feed_dict = { images:input_vars[:,i,:,:,:] }
            pool_tensor = graph.get_tensor_by_name("import/pool5:0")
            pool_tensor = sess.run(pool_tensor, feed_dict=feed_dict)
            convnets.append(tf.contrib.layers.flatten(pool_tensor))
        convpool = tf.pack(convnets, axis = 1)
        return convpool
项目:tf_serving_example    作者:Vetal1977    | 项目源码 | 文件源码
def load_and_predict_with_saved_model():
    '''
    Loads saved as protobuf model and make prediction on a single image
    '''
    with tf.Session(graph=tf.Graph()) as sess:
        # restore save model
        export_dir = './gan-export/1'
        model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir)
        # print(model)
        loaded_graph = tf.get_default_graph()

        # get necessary tensors by name
        input_tensor_name = model.signature_def['predict_images'].inputs['images'].name
        input_tensor = loaded_graph.get_tensor_by_name(input_tensor_name)
        output_tensor_name = model.signature_def['predict_images'].outputs['scores'].name
        output_tensor = loaded_graph.get_tensor_by_name(output_tensor_name)

        # make prediction
        image_file_name = './svnh_test_images/image_3.jpg'
        with open(image_file_name, 'rb') as f:
            image = f.read()
            scores = sess.run(output_tensor, {input_tensor: [image]})

        # print results
        print("Scores: {}".format(scores))
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def train_dynamic(self):
        print("inside train")
        model_spec = self.get_model_by_name(MyBatch.dynamic_model)
        #print("        action for a dynamic model", model_spec)
        session = self.pipeline.get_variable("session")
        with self.pipeline.get_variable("print lock"):
            print("\n\n ================= train dynamic ====================")
            print("----- default graph")
            #print(tf.get_default_graph().get_operations())
            print("----- session graph")
            print(session.graph.get_operations())
        input_data, model_output = model_spec
        res = session.run(model_output, feed_dict={input_data: self.data})
        self.pipeline.get_variable("loss history").append(res)
        #print("        ", int(res))
        return self
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def eval_tensor(sess, input_tensor_name, input_val, output_tensor_name):
  """Get output value of a specific tensor.

  Assuming the default graph is used.

  Args:
    sess: tf session object.
    input_tensor_name: name of the input tensor.
    input_val: input value to the network.
    output_tensor_name: name of the output tensor.
  Returns:
    result of output tensor.
  """
  cur_graph = tf.get_default_graph()
  input_tensor = cur_graph.get_tensor_by_name(input_tensor_name)
  output_tensor = cur_graph.get_tensor_by_name(output_tensor_name)
  out_val = sess.run(output_tensor, feed_dict={input_tensor: input_val})
  return out_val
项目:texture-networks    作者:ProofByConstruction    | 项目源码 | 文件源码
def conv_block(name, input_layer, kernel_size, out_channels):
    """
    Per Ulyanov et el, this is a block consisting of
        - Mirror pad (TODO)
        - Number of maps from a convolutional layer equal to out_channels (multiples of 8)
        - Spatial BatchNorm
        - LeakyReLu
    """
    with tf.get_default_graph().name_scope(name):
        in_channels = input_layer.get_shape().as_list()[-1]

        # Xavier initialization, http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
        # The application of this method here seems unorthodox since we're using ReLU, not sigmoid or tanh.
        low = -np.sqrt(6.0/(in_channels + out_channels))
        high = np.sqrt(6.0/(in_channels + out_channels))
        weights = tf.Variable(tf.random_uniform([kernel_size, kernel_size, in_channels, out_channels], minval=low, maxval=high), name='weights')
        biases = tf.Variable(tf.random_uniform([out_channels], minval=low, maxval=high), name='biases')
        # TODO: Mirror pad the conv2d? I'm not sure how important this is.
        conv = conv2d(input_layer, weights, biases)
        batch_norm = spatial_batch_norm(conv)
        relu = leaky_relu(batch_norm, .01)
        return relu
项目:FPN    作者:xmyqsh    | 项目源码 | 文件源码
def __init__(self, sess, network, imdb, roidb, output_dir, logdir, pretrained_model=None):
        """Initialize the SolverWrapper."""
        self.net = network
        self.imdb = imdb
        self.roidb = roidb
        self.output_dir = output_dir
        self.pretrained_model = pretrained_model

        print 'Computing bounding-box regression targets...'
        if cfg.TRAIN.BBOX_REG:
            self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
        print 'done'

        # For checkpoint
        self.saver = tf.train.Saver(max_to_keep=100)
        self.writer = tf.summary.FileWriter(logdir=logdir,
                                             graph=tf.get_default_graph(),
                                             flush_secs=5)
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def __init__(self, sess, network, imdb, roidb, output_dir, logdir, pretrained_model=None):
        """Initialize the SolverWrapper."""
        self.net = network
        self.imdb = imdb
        self.roidb = roidb
        self.output_dir = output_dir
        self.pretrained_model = pretrained_model

        print 'Computing bounding-box regression targets...'
        if cfg.TRAIN.BBOX_REG:
            self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
        print 'done'

        # For checkpoint
        self.saver = tf.train.Saver(max_to_keep=100)
        self.writer = tf.train.SummaryWriter(logdir=logdir,
                                             graph=tf.get_default_graph(),
                                             flush_secs=5)
项目:nlvr_tau_nlp_final_proj    作者:udiNaveh    | 项目源码 | 文件源码
def get_feed_dicts_from_sentence(sentence, sentence_placeholder, sent_lengths_placeholder, sentence_words_bow,
                                 encoder_output_tensors, learn_embeddings=False):
    """
    creates the values needed and feed-dicts that depend on the sentence.
    these feed dicts are used to run or to compute gradients.
    """

    sentence_matrix = np.stack([one_hot_dict.get(w, one_hot_dict['<UNK>']) for w in sentence.split()])
    bow_words = np.reshape(np.sum([words_array == x for x in sentence.split()], axis=0), [1, len(words_vocabulary)])

    length = [len(sentence.split())]
    encoder_feed_dict = {sentence_placeholder: sentence_matrix, sent_lengths_placeholder: length,
                         sentence_words_bow: bow_words}
    sentence_encoder_outputs = sess.run(encoder_output_tensors, feed_dict=encoder_feed_dict)
    decoder_feed_dict = {encoder_output_tensors[i]: sentence_encoder_outputs[i]
                         for i in range(len(encoder_output_tensors))}

    if not learn_embeddings:
        W_we = tf.get_default_graph().get_tensor_by_name('W_we:0')
        encoder_feed_dict = union_dicts(encoder_feed_dict, {W_we: embeddings_matrix})
    return encoder_feed_dict, decoder_feed_dict
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def underlying_variable(t):
    """Find the underlying tf.Variable object.

    Args:
      t: a Tensor

    Returns:
      a tf.Varaible object.
    """
    t = variable_ref(t)
    assert t is not None
    # make sure that the graph has a variable index and that it is up-to-date
    if not hasattr(tf.get_default_graph(), "var_index"):
        tf.get_default_graph().var_index = {}
    var_index = tf.get_default_graph().var_index
    for v in tf.global_variables()[len(var_index):]:
        var_index[v.name] = v
    return var_index[t.name]
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _lower_bound(inputs, bound, name=None):
    """Same as tf.maximum, but with helpful gradient for inputs < bound.
    The gradient is overwritten so that it is passed through if the input is not
    hitting the bound. If it is, only gradients that push `inputs` higher than
    the bound are passed through. No gradients are passed through to the bound.
    Args:
      inputs: input tensor
      bound: lower bound for the input tensor
      name: name for this op
    Returns:
      tf.maximum(inputs, bound)
    """
    with tf.name_scope(name, 'GDNLowerBoundTefla', [inputs, bound]) as scope:
        inputs = tf.convert_to_tensor(inputs, name='inputs')
        bound = tf.convert_to_tensor(bound, name='bound')
        with tf.get_default_graph().gradient_override_map(
                {'Maximum': 'GDNLowerBoundTefla'}):
            return tf.maximum(inputs, bound, name=scope)
项目:iCaRL    作者:srebuffi    | 项目源码 | 文件源码
def reading_data_and_preparing_network(files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing, nb_groups, nb_cl, save_path):
    image_train, label_train,file_string       = utils_data.read_data_test(train_path,labels_dic, mixing,files_from_cl=files_from_cl)
    image_batch, label_batch,file_string_batch = tf.train.batch([image_train, label_train,file_string], batch_size=batch_size, num_threads=8)
    label_batch_one_hot = tf.one_hot(label_batch,nb_groups*nb_cl)

    ### Network and loss function  
    mean_img = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
    with tf.variable_scope('ResNet18'):
        with tf.device('/gpu:'+gpu):
            scores         = utils_resnet.ResNet18(image_batch-mean_img, phase='test',num_outputs=nb_cl*nb_groups)
            graph          = tf.get_default_graph()
            op_feature_map = graph.get_operation_by_name('ResNet18/pool_last/avg').outputs[0]

    loss_class = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch_one_hot, logits=scores))

    ### Initilization
    params = dict(cPickle.load(open(save_path+'model-iteration'+str(nb_cl)+'-%i.pickle' % itera)))
    inits  = utils_resnet.get_weight_initializer(params)

    return inits,scores,label_batch,loss_class,file_string_batch,op_feature_map
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def restore_model(self):
        # Load meta graph and learned weights
        saver = tf.train.import_meta_graph(self.export_dir + self.name + '.meta')
        saver.restore(self.session, tf.train.latest_checkpoint(self.export_dir))

        # Get input and output nodes
        graph = tf.get_default_graph()
        self.input = graph.get_tensor_by_name("input_node:0")
        self.input_len = graph.get_tensor_by_name("input_lengths:0")
        self.output = graph.get_tensor_by_name("output_node:0")
项目:skiprnn-2017-telecombcn    作者:imatge-upc    | 项目源码 | 文件源码
def _binary_round(x):
    """
    Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},
    using the straight through estimator for the gradient.

    Based on http://r2rt.com/binary-stochastic-neurons-in-tensorflow.html

    :param x: input tensor
    :return: y=round(x) with gradients defined by the identity mapping (y=x)
    """
    g = tf.get_default_graph()

    with ops.name_scope("BinaryRound") as name:
        with g.gradient_override_map({"Round": "Identity"}):
            return tf.round(x, name=name)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def after_run(self, _run_context, run_values):
    if not self.is_chief or self._done:
      return

    step_done = run_values.results
    if self._active:
      tf.logging.info("Captured full trace at step %s", step_done)
      # Create output directory
      gfile.MakeDirs(self._output_dir)

      # Save run metadata
      trace_path = os.path.join(self._output_dir, "run_meta")
      with gfile.GFile(trace_path, "wb") as trace_file:
        trace_file.write(run_values.run_metadata.SerializeToString())
        tf.logging.info("Saved run_metadata to %s", trace_path)

      # Save timeline
      timeline_path = os.path.join(self._output_dir, "timeline.json")
      with gfile.GFile(timeline_path, "w") as timeline_file:
        tl_info = timeline.Timeline(run_values.run_metadata.step_stats)
        tl_chrome = tl_info.generate_chrome_trace_format(show_memory=True)
        timeline_file.write(tl_chrome)
        tf.logging.info("Saved timeline to %s", timeline_path)

      # Save tfprof op log
      tf.contrib.tfprof.tfprof_logger.write_op_log(
          graph=tf.get_default_graph(),
          log_dir=self._output_dir,
          run_meta=run_values.run_metadata)
      tf.logging.info("Saved op log to %s", self._output_dir)
      self._active = False
      self._done = True

    self._active = (step_done >= self.params["step"])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def begin(self):
    # Dump to file on the chief worker
    if self.is_chief:
      opts = tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
      opts['dump_to_file'] = os.path.abspath(self._filename)
      tf.contrib.tfprof.model_analyzer.print_model_analysis(
          tf.get_default_graph(), tfprof_options=opts)

    # Print the model analysis
    with gfile.GFile(self._filename) as file:
      tf.logging.info(file.read())
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def _eval_cnn(self):
    """Evaluate a model every self.params.eval_interval_secs.

    Returns:
      Dictionary containing eval statistics. Currently returns an empty
      dictionary.
    """
    (image_producer_ops, enqueue_ops, fetches) = self._build_model()
    saver = tf.train.Saver(self.variable_mgr.savable_variables())
    summary_writer = tf.summary.FileWriter(self.params.eval_dir,
                                           tf.get_default_graph())
    target = ''
    local_var_init_op = tf.local_variables_initializer()
    variable_mgr_init_ops = [local_var_init_op]
    with tf.control_dependencies([local_var_init_op]):
      variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())
    local_var_init_op_group = tf.group(*variable_mgr_init_ops)
    summary_op = tf.summary.merge_all()
    # TODO(huangyp): Check if checkpoints haven't updated for hours and abort.
    while True:
      self._eval_once(saver, summary_writer, target, local_var_init_op_group,
                      image_producer_ops, enqueue_ops, fetches, summary_op)
      if self.params.eval_interval_secs <= 0:
        break
      time.sleep(self.params.eval_interval_secs)
    return {}
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _find_or_raise(self, tensor_name: str) -> tf.Tensor:
        """
        Find the tensor with the given name in the default graph or raise an exception.
        :param tensor_name: tensor name to be find
        :return: tf.Tensor
        """
        full_name = self._get_full_name(tensor_name)
        try:
            return tf.get_default_graph().get_tensor_by_name(full_name)
        except (KeyError, ValueError, TypeError) as ex:
            raise ValueError('Tensor `{}` with full name `{}` was not found.'.format(tensor_name, full_name)) from ex
项目:sea-lion-counter    作者:rdinse    | 项目源码 | 文件源码
def get_node(name):
  return tf.get_default_graph().as_graph_element(name.split(":")[0])
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _batch_normalization(self, x, layer_name, eps=0.001):
        with tf.variable_scope(layer_name.split('/')[-1]):
            beta, gamma, mean, variance = self._get_batch_normalization_weights(layer_name)
            # beta, gamma, mean, variance are numpy arrays!!!

            if beta is None:
                try:
                    net = tf.layers.batch_normalization(x, epsilon = eps)
                except:
                    net = tf.nn.batch_normalization(x, 0, 1, 0, 1, 0.01)
            else:
                try:
                    net = tf.layers.batch_normalization(x, epsilon = eps,        
                        beta_initializer = tf.constant_initializer(value=beta,dtype=tf.float32),
                        gamma_initializer = tf.constant_initializer(value=gamma,dtype=tf.float32),
                        moving_mean_initializer = tf.constant_initializer(value=mean,dtype=tf.float32),
                        moving_variance_initializer = tf.constant_initializer(value=variance,dtype=tf.float32), 
                    )
                except:
                    net = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.01)
        mean = '%s/batch_normalization/moving_mean:0'%(layer_name)
        variance = '%s/batch_normalization/moving_variance:0'%(layer_name)
        try:
            tf.add_to_collection(tf.GraphKeys.SAVE_TENSORS, tf.get_default_graph().get_tensor_by_name(mean))
            tf.add_to_collection(tf.GraphKeys.SAVE_TENSORS, tf.get_default_graph().get_tensor_by_name(variance))
        except:
            pass
        return net
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def get_extraction_target(inputs, outputs, to_extract, **loss_params):
        """Produce validation target function.

        Example validation target function to use to provide targets for extracting features.
        This function also adds a standard "loss" target which you may or not may not want

        The to_extract argument must be a dictionary of the form
              {name_for_saving: name_of_actual_tensor, ...}
        where the "name_for_saving" is a human-friendly name you want to save extracted
        features under, and name_of_actual_tensor is a name of the tensor in the tensorflow
        graph outputing the features desired to be extracted.  To figure out what the names
        of the tensors you want to extract are "to_extract" argument,  uncomment the
        commented-out lines, which will print a list of all available tensor names.

        """
        names = [[x.name for x in op.values()] for op in tf.get_default_graph().get_operations()]
        names = [y for x in names for y in x]

        r = re.compile(r'__GPU__\d/')
        _targets = defaultdict(list)

        for name in names:
            name_without_gpu_prefix = r.sub('', name)
            for save_name, actual_name in to_extract.items():
                if actual_name in name_without_gpu_prefix:
                    tensor = tf.get_default_graph().get_tensor_by_name(name)
                    _targets[save_name].append(tensor)

        targets = {k: tf.concat(v, axis=0) for k, v in _targets.items()}
        targets['loss'] = utils.get_loss(inputs, outputs, **loss_params)
        return targets
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def get_extraction_target(inputs, outputs, to_extract, **loss_params):
    """Produce validation target function.

    Example validation target function to use to provide targets for extracting features.
    This function also adds a standard "loss" target which you may or not may not want

    The to_extract argument must be a dictionary of the form
          {name_for_saving: name_of_actual_tensor, ...}
    where the "name_for_saving" is a human-friendly name you want to save extracted
    features under, and name_of_actual_tensor is a name of the tensor in the tensorflow
    graph outputing the features desired to be extracted.  To figure out what the names
    of the tensors you want to extract are "to_extract" argument,  uncomment the
    commented-out lines, which will print a list of all available tensor names.

    """
    names = [[x.name for x in op.values()] for op in tf.get_default_graph().get_operations()]
    names = [y for x in names for y in x]

    r = re.compile(r'__GPU__\d/')
    _targets = defaultdict(list)

    for name in names:
        name_without_gpu_prefix = r.sub('', name)
        for save_name, actual_name in to_extract.items():
            if actual_name in name_without_gpu_prefix:
                tensor = tf.get_default_graph().get_tensor_by_name(name)
                _targets[save_name].append(tensor)

    targets = {k: tf.concat(v, axis=0) for k, v in _targets.items()}
    targets['loss'] = utils.get_loss(inputs, outputs, **loss_params)
    return targets
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def _reuse_scope_name(self, name):
        graph = tf.get_default_graph()
        if graph._name_stack is not None and graph._name_stack != '':
            name = graph._name_stack + '/' + name + '/'  # this will reuse the already-created scope
        else:
            name += '/'
        return name
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def freeze(model_scope, model_dir, model_file):
    """
    Args:
        model_scope: The prefix of all variables in the model.
        model_dir: The full path to the folder in which the result file locates.
        model_file: The file that saves the training results, without file suffix / extension.
    """
    saver = tf.train.import_meta_graph(os.path.join(model_dir, model_file + ".meta"))
    graph = tf.get_default_graph()
    input_graph_def = graph.as_graph_def()

    with tf.Session() as sess:
        saver.restore(sess, os.path.join(model_dir, model_file))

        print("# All operations:")
        for op in graph.get_operations():
            print(op.name)

        output_node_names = [v.name.split(":")[0] for v in tf.trainable_variables()]
        output_node_names.append("{}/readout/logits".format(model_scope))
        output_graph_def = tf.graph_util.convert_variables_to_constants(
            sess,
            input_graph_def,
            output_node_names
        )

        output_file = os.path.join(model_dir, model_file + ".pb")
        with tf.gfile.GFile(output_file, "wb") as f:
            f.write(output_graph_def.SerializeToString())

        print("Freezed model was saved as {}.pb.".format(model_file))
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def save_graph(save_path):
    graph = tf.get_default_graph()
    graph_def = graph.as_graph_def()
    print "graph_def byte size", graph_def.ByteSize()
    graph_def_s = graph_def.SerializeToString()

    with open(save_path, "wb") as f:
        f.write(graph_def_s)

    print "saved model to %s" % save_path
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def load_old_model(sess, nlayers, device='/cpu:0'):
    with tf.device(device):
        new_saver = tf.train.import_meta_graph(meta_fn(nlayers))
    new_saver.restore(sess, checkpoint_fn(nlayers))
    graph = tf.get_default_graph()
    prob_tensor = graph.get_tensor_by_name("prob:0")
    images = graph.get_tensor_by_name("images:0")
    return graph, images, prob_tensor