Python tensorflow.python.platform.gfile 模块,FastGFile() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.platform.gfile.FastGFile()

项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def load_model(model):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def load_frozen_graph(graph_dir, fix_nodes=True, entry=None, output=None):
        with gfile.FastGFile(graph_dir, "rb") as file:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(file.read())
            if fix_nodes:
                for node in graph_def.node:
                    if node.op == 'RefSwitch':
                        node.op = 'Switch'
                        for index in range(len(node.input)):
                            if 'moving_' in node.input[index]:
                                node.input[index] = node.input[index] + '/read'
                    elif node.op == 'AssignSub':
                        node.op = 'Sub'
                        if 'use_locking' in node.attr:
                            del node.attr['use_locking']
            tf.import_graph_def(graph_def, name="")
            if entry is not None:
                entry = tf.get_default_graph().get_tensor_by_name(entry)
            if output is not None:
                output = tf.get_default_graph().get_tensor_by_name(output)
            return entry, output
项目:tensorflow-prebuilt-classifier    作者:recursionbane    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:tensorflow-image-classifier    作者:burliEnterprises    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:tensorflow-image-classifier    作者:burliEnterprises    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  except:
    raise RuntimeError('Error during processing file %s' % image_path)

  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:oversight    作者:hebenon    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:powerai-transfer-learning    作者:IBM    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:powerai-transfer-learning    作者:IBM    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  except:
    raise RuntimeError('Error during processing file %s' % image_path)

  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def load_model(model):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
项目:tf_face    作者:ZhijianChan    | 项目源码 | 文件源码
def load_model(sess, model_path):
    if os.path.isfile(model_path):
        # A protobuf file with a frozen graph
        print('Model filename: %s' % model_path)
        with gfile.FastGFile(model_path, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        # A directory containing a metagraph file and a checkpoint file
        print('Model directory: %s' % model_path)
        meta_file, ckpt_file = get_model_filenames(model_path)
        print('Metagraph  file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)
        saver = tf.train.import_meta_graph(os.path.join(model_path, meta_file), clear_devices=True)
        saver.restore(sess, os.path.join(model_path, ckpt_file))
项目:image-classification-tensorflow    作者:xuetsing    | 项目源码 | 文件源码
def create_inception_graph():
    """"
    Brief:
        Creates a graph from saved GraphDef file and returns a Graph object.
    Returns:
        Graph holding the trained Inception network, and various tensors we'll be
        manipulating.
    """
    with tf.Graph().as_default() as graph:
        model_filename = os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb')
        with gfile.FastGFile(model_filename, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
                tf.import_graph_def(graph_def, name='', return_elements=[
                    BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
                    RESIZED_INPUT_TENSOR_NAME]))
    return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:image-classification-tensorflow    作者:xuetsing    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
    """Create a single bottleneck file."""
    print('Creating bottleneck at ' + bottleneck_path)
    image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
    if not gfile.Exists(image_path):
        tf.logging.fatal('File does not exist %s', image_path)
    image_data = gfile.FastGFile(image_path, 'rb').read()
    try:
        bottleneck_values = run_bottleneck_on_image(
            sess, image_data, jpeg_data_tensor, bottleneck_tensor)
    except:
        raise RuntimeError('Error during processing file %s' % image_path)

    bottleneck_string = ','.join(str(x) for x in bottleneck_values)
    with open(bottleneck_path, 'w') as bottleneck_file:
        bottleneck_file.write(bottleneck_string)
项目:tensorflow-yys    作者:ystyle    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:tensorflow-yys    作者:ystyle    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  except:
    raise RuntimeError('Error during processing file %s' % image_path)

  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:RC-experiments    作者:cairoHy    | 项目源码 | 文件源码
def gen_embeddings(word_dict, embed_dim, in_file=None, init=np.zeros):
        """
        Init embedding matrix with (or without) pre-trained word embeddings.
        """
        num_words = max(word_dict.values()) + 1
        embedding_matrix = init(-0.05, 0.05, (num_words, embed_dim))
        logger('Embeddings: %d x %d' % (num_words, embed_dim))

        if not in_file:
            return embedding_matrix

        def get_dim(file):
            first = gfile.FastGFile(file, mode='r').readline()
            return len(first.split()) - 1

        assert get_dim(in_file) == embed_dim
        logger('Loading embedding file: %s' % in_file)
        pre_trained = 0
        for line in codecs.open(in_file, encoding="utf-8"):
            sp = line.split()
            if sp[0] in word_dict:
                pre_trained += 1
                embedding_matrix[word_dict[sp[0]]] = np.asarray([float(x) for x in sp[1:]], dtype=np.float32)
        logger("Pre-trained: {}, {:.3f}%".format(pre_trained, pre_trained * 100.0 / num_words))
        return embedding_matrix
项目:RC-experiments    作者:cairoHy    | 项目源码 | 文件源码
def gen_char_vocab(data_file, tokenizer=default_tokenizer, old_counter=None):
        """
         generate character level vocabulary according to train corpus.
        """
        logger("Creating character dict from data {}.".format(data_file))
        char_counter = old_counter if old_counter else Counter()
        with gfile.FastGFile(data_file) as f:
            for line in f:
                tokens = tokenizer(line.rstrip("\n"))
                char_counter.update([char for word in tokens for char in word])

        # summary statistics
        total_chars = sum(char_counter.values())
        distinct_chars = len(list(char_counter))

        logger("STATISTICS" + "-" * 20)
        logger("Total characters: " + str(total_chars))
        logger("Total distinct characters: " + str(distinct_chars))
        return char_counter
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def load_model(model):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
项目:document-classification    作者:nagelflorian    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:inception-face-shape-classifier    作者:adonistio    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def createBottleneckFile(bottleneckPath, imageLists, labelName, index,
                         imageDir, category, sess, jpegDataTensor,
                         bottleneckTensor):
    print('Create bottleneck at ' + bottleneckPath)
    imagePath = getImagePath(imageLists, labelName, index,
                             imageDir, category)
    if not gfile.Exists(imagePath):
        tf.logging.fatal('File not exist %s', imagePath)
    imageData = gfile.FastGFile(imagePath, 'rb').read()
    try:
        bottleneckValues = runBottleneckOnImage(sess,
                           imageData, jpegDataTensor, bottleneckTensor)
    except:
        pass

    bottleneckString = ','.join(str(x) for x in bottleneckValues)
    with open(bottleneckPath, 'w') as f:
        f.write(bottleneckString)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def getRandomDistortedBottlenecks(sess, imageLists, num, category, imageDir,
                                  inputJpegTensor, distortedImage,
                                  resizedInputTensor, bottleneckTensor):
    classCount = len(imageLists.keys())
    bottlenecks = []
    groundTruths = []
    for _ in range(num):
        labelIndex = random.randrange(classCount)
        labelName = list(imageLists.keys())[labelIndex]
        imageIndex = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
        imagePath = getImagePath(imageLists, labelName, imageIndex,
                                 imageDir, category)
        if not gfile.Exists(imagePath):
            tf.logging.fatal('File not exist %s', imagePath)
        jpegData = gfile.FastGFile(imagePath, 'rb').read()
        distortedImageData = sess.run(distortedImage, {inputJpegTensor: jpegData})
        bottleneck = runBottleneckOnImage(sess, distortedImageData,
                                          resizedInputTensor, bottleneckTensor)
        groundTruth = np.zeros(classCount, dtype = np.float32)
        groundTruth[labelIndex] = 1.0
        bottlenecks.append(bottleneck)
        groundTruths.append(groundTruth)
    return bottlenecks, groundTruths
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def createBottleneckFile(bottleneckPath, imageLists, labelName, index,
                         imageDir, category, sess, jpegDataTensor,
                         bottleneckTensor):
    print('Create bottleneck at ' + bottleneckPath)
    imagePath = getImagePath(imageLists, labelName, index,
                             imageDir, category)
    if not gfile.Exists(imagePath):
        tf.logging.fatal('File not exist %s', imagePath)
    imageData = gfile.FastGFile(imagePath, 'rb').read()
    try:
        bottleneckValues = runBottleneckOnImage(sess,
                           imageData, jpegDataTensor, bottleneckTensor)
    except:
        pass

    bottleneckString = ','.join(str(x) for x in bottleneckValues)
    with open(bottleneckPath, 'w') as f:
        f.write(bottleneckString)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def createBottleneckFile(bottleneckPath, imageLists, labelName, index,
                         imageDir, category, sess, jpegDataTensor,
                         bottleneckTensor):
    print('Create bottleneck at ' + bottleneckPath)
    imagePath = getImagePath(imageLists, labelName, index,
                             imageDir, category)
    if not gfile.Exists(imagePath):
        tf.logging.fatal('File not exist %s', imagePath)
    imageData = gfile.FastGFile(imagePath, 'rb').read()
    try:
        bottleneckValues = runBottleneckOnImage(sess,
                           imageData, jpegDataTensor, bottleneckTensor)
    except:
        pass

    bottleneckString = ','.join(str(x) for x in bottleneckValues)
    with open(bottleneckPath, 'w') as f:
        f.write(bottleneckString)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def getRandomDistortedBottlenecks(sess, imageLists, num, category, imageDir,
                                  inputJpegTensor, distortedImage,
                                  resizedInputTensor, bottleneckTensor):
    classCount = len(imageLists.keys())
    bottlenecks = []
    groundTruths = []
    for _ in range(num):
        labelIndex = random.randrange(classCount)
        labelName = list(imageLists.keys())[labelIndex]
        imageIndex = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
        imagePath = getImagePath(imageLists, labelName, imageIndex,
                                 imageDir, category)
        if not gfile.Exists(imagePath):
            tf.logging.fatal('File not exist %s', imagePath)
        jpegData = gfile.FastGFile(imagePath, 'rb').read()
        distortedImageData = sess.run(distortedImage, {inputJpegTensor: jpegData})
        bottleneck = runBottleneckOnImage(sess, distortedImageData,
                                          resizedInputTensor, bottleneckTensor)
        groundTruth = np.zeros(classCount, dtype = np.float32)
        groundTruth[labelIndex] = 1.0
        bottlenecks.append(bottleneck)
        groundTruths.append(groundTruth)
    return bottlenecks, groundTruths
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def createBottleneckFile(bottleneckPath, imageLists, labelName, index,
                         imageDir, category, sess, jpegDataTensor,
                         bottleneckTensor):
    print('Create bottleneck at ' + bottleneckPath)
    imagePath = getImagePath(imageLists, labelName, index,
                             imageDir, category)
    if not gfile.Exists(imagePath):
        tf.logging.fatal('File not exist %s', imagePath)
    imageData = gfile.FastGFile(imagePath, 'rb').read()
    try:
        bottleneckValues = runBottleneckOnImage(sess,
                           imageData, jpegDataTensor, bottleneckTensor)
    except:
        pass

    bottleneckString = ','.join(str(x) for x in bottleneckValues)
    with open(bottleneckPath, 'w') as f:
        f.write(bottleneckString)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def createBottleneckFile(bottleneckPath, imageLists, labelName, index,
                         imageDir, category, sess, jpegDataTensor,
                         bottleneckTensor):
    print('Create bottleneck at ' + bottleneckPath)
    imagePath = getImagePath(imageLists, labelName, index,
                             imageDir, category)
    if not gfile.Exists(imagePath):
        tf.logging.fatal('File not exist %s', imagePath)
    imageData = gfile.FastGFile(imagePath, 'rb').read()
    try:
        bottleneckValues = runBottleneckOnImage(sess,
                           imageData, jpegDataTensor, bottleneckTensor)
    except:
        pass

    bottleneckString = ','.join(str(x) for x in bottleneckValues)
    with open(bottleneckPath, 'w') as f:
        f.write(bottleneckString)
项目:PlantImageRecognition    作者:HeavenMin    | 项目源码 | 文件源码
def getRandomDistortedBottlenecks(sess, imageLists, num, category, imageDir,
                                  inputJpegTensor, distortedImage,
                                  resizedInputTensor, bottleneckTensor):
    classCount = len(imageLists.keys())
    bottlenecks = []
    groundTruths = []
    for _ in range(num):
        labelIndex = random.randrange(classCount)
        labelName = list(imageLists.keys())[labelIndex]
        imageIndex = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
        imagePath = getImagePath(imageLists, labelName, imageIndex,
                                 imageDir, category)
        if not gfile.Exists(imagePath):
            tf.logging.fatal('File not exist %s', imagePath)
        jpegData = gfile.FastGFile(imagePath, 'rb').read()
        distortedImageData = sess.run(distortedImage, {inputJpegTensor: jpegData})
        bottleneck = runBottleneckOnImage(sess, distortedImageData,
                                          resizedInputTensor, bottleneckTensor)
        groundTruth = np.zeros(classCount, dtype = np.float32)
        groundTruth[labelIndex] = 1.0
        bottlenecks.append(bottleneck)
        groundTruths.append(groundTruth)
    return bottlenecks, groundTruths
项目:tensorflow-for-poets-2    作者:googlecodelabs    | 项目源码 | 文件源码
def create_model_graph(model_info):
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Args:
    model_info: Dictionary containing information about the model architecture.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
    with gfile.FastGFile(model_path, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
          graph_def,
          name='',
          return_elements=[
              model_info['bottleneck_tensor_name'],
              model_info['resized_input_tensor_name'],
          ]))
  return graph, bottleneck_tensor, resized_input_tensor
项目:tensorflow-for-poets-2    作者:googlecodelabs    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           decoded_image_tensor, resized_input_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  tf.logging.info('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, decoded_image_tensor,
        resized_input_tensor, bottleneck_tensor)
  except Exception as e:
    raise RuntimeError('Error during processing file %s (%s)' % (image_path,
                                                                 str(e)))
  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:kaggle-distracted-drivers-inceptionv3    作者:ckleban    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def load_model(model):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
项目:Tensorflow-Image-Classification    作者:AxelAli    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:MachineLearningGoogleSeries    作者:TheCoinTosser    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def create_model_graph(model_info):
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Args:
    model_info: Dictionary containing information about the model architecture.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
    with gfile.FastGFile(model_path, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
          graph_def,
          name='',
          return_elements=[
              model_info['bottleneck_tensor_name'],
              model_info['resized_input_tensor_name'],
          ]))
  return graph, bottleneck_tensor, resized_input_tensor
项目:ZOO-Attack    作者:huanzhang12    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           decoded_image_tensor, resized_input_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  tf.logging.info('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, decoded_image_tensor,
        resized_input_tensor, bottleneck_tensor)
  except Exception as e:
    raise RuntimeError('Error during processing file %s (%s)' % (image_path,
                                                                 str(e)))
  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:tensorflow-image-classifier    作者:damianmoore    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:ctrl-f-vision    作者:osmanio2    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:tensorflow-video-classifier    作者:burliEnterprises    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:tensorflow-video-classifier    作者:burliEnterprises    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  except:
    raise RuntimeError('Error during processing file %s' % image_path)

  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:real-time-face-recognition    作者:iwantooxxoox    | 项目源码 | 文件源码
def load_model(model):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp,'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
项目:transfer_learning_sound_classification    作者:lukeinator42    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:FaceRecognition    作者:habrman    | 项目源码 | 文件源码
def load_model(model):
    model_exp = os.path.expanduser(model)
    if (os.path.isfile(model_exp)):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)

        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)

        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
项目:tensorflow-image-detection    作者:ArunMichaelDsouza    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Graph().as_default() as graph:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:tensorflow-image-detection    作者:ArunMichaelDsouza    | 项目源码 | 文件源码
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  except:
    raise RuntimeError('Error during processing file %s' % image_path)

  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string)
项目:Multi-label-Inception-net    作者:BartyzalRadek    | 项目源码 | 文件源码
def create_inception_graph():
  """"Creates a graph from saved GraphDef file and returns a Graph object.

  Returns:
    Graph holding the trained Inception network, and various tensors we'll be
    manipulating.
  """
  with tf.Session() as sess:
    model_filename = os.path.join(
        FLAGS.model_dir, 'classify_image_graph_def.pb')
    with gfile.FastGFile(model_filename, 'rb') as f:
      graph_def = tf.GraphDef()
      graph_def.ParseFromString(f.read())
      bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
          tf.import_graph_def(graph_def, name='', return_elements=[
              BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
              RESIZED_INPUT_TENSOR_NAME]))
  return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
项目:yolov2    作者:datlife    | 项目源码 | 文件源码
def visualize_graph_in_tfboard(filename, output='./log'):
    with tf.Session() as sess:
        model_filename = filename
        with gfile.FastGFile(model_filename, 'rb') as f:
            data = compat.as_bytes(f.read())
            sm = saved_model_pb2.SavedModel()
            sm.ParseFromString(data)
            if 1 != len(sm.meta_graphs):
                print('More than one graph found. Not sure which to write')
                sys.exit(1)

            g_in = tf.import_graph_def(sm.meta_graphs[0].graph_def)

        train_writer = tf.summary.FileWriter(output)
        train_writer.add_graph(sess.graph)
        print("Please execute `tensorboard --logdir {}` to view graph".format(output))
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def create_graph():
    """"Creates a graph from saved GraphDef file and returns a saver."""
    # Creates graph from saved graph_def.pb.
    print 'Loading graph...'
    with tf.Session() as sess:
        with gfile.FastGFile(model, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            _ = tf.import_graph_def(graph_def, name='')
    return sess.graph
项目:attention-sum-reader    作者:cairoHy    | 项目源码 | 文件源码
def gen_vocab(data_file, tokenizer=default_tokenizer, old_counter=None, max_count=None):
    """
    ???????????????
    """
    logging.info("Creating word_dict from data %s" % data_file)
    word_counter = old_counter if old_counter else Counter()
    counter = 0
    with gfile.FastGFile(data_file) as f:
        for line in f:
            counter += 1
            if max_count and counter > max_count:
                break
            tokens = tokenizer(line.rstrip('\n'))
            word_counter.update(tokens)
            if counter % 100000 == 0:
                logging.info("Process line %d Done." % counter)

    # summary statistics
    total_words = sum(word_counter.values())
    distinct_words = len(list(word_counter))

    logging.info("STATISTICS" + "-" * 20)
    logging.info("Total words: " + str(total_words))
    logging.info("Total distinct words: " + str(distinct_words))

    return word_counter
项目:attention-sum-reader    作者:cairoHy    | 项目源码 | 文件源码
def save_vocab(word_counter, vocab_file, max_vocab_num=None):
    with gfile.FastGFile(vocab_file, "w") as f:
        for word in _START_VOCAB:
            f.write(word + "\n")
        for word in list(map(lambda x: x[0], word_counter.most_common(max_vocab_num))):
            f.write(word + "\n")