Python tensorflow.contrib.slim 模块,get_model_variables() 实例源码

我们从Python开源项目中,提取了以下42个代码示例,用于说明如何使用tensorflow.contrib.slim.get_model_variables()

项目:yt8m    作者:forwchen    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:mlc2017-online    作者:machine-learning-challenge    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    model_input_raw, labels_batch = (
        self.reader.prepare_serialized_examples(serialized_examples))

    model_input = model_input_raw

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_classes=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      prediction, index = tf.nn.top_k(predictions, 1)
    return prediction, index
项目:mlc2017-online    作者:machine-learning-challenge    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    image_id, model_input_raw, labels_batch = (
        self.reader.prepare_serialized_examples(serialized_examples))

    model_input = model_input_raw

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_classes=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      prediction, index = tf.nn.top_k(predictions, 1)
    return image_id, prediction, index
项目:youtube-8m    作者:google    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:Video-Classification    作者:boyaolin    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:DenseHumanBodyCorrespondences    作者:halimacc    | 项目源码 | 文件源码
def classify(model_range, seg_range, feature_lr, classifier_lr):
        feat_opt = tf.train.AdamOptimizer(feature_lr)
        clas_opt = tf.train.AdamOptimizer(classifier_lr)
        for model in model_range:
            for seg in seg_range:
                with tf.variable_scope('classifier-{}-{}'.format(model, seg)):
                    self.preds[(model, seg)] = slim.conv2d(self.feature, 500, [1, 1])
                    self.clas_vars[(model, seg)] = slim.get_model_variables()[-2:]

                with tf.variable_scope('losses-{}-{}'.format(model, seg)):
                    self.losses[(model, seg)] = self.loss(self.labels, self.preds[(model, seg)])
                    grad = tf.gradients(self.losses[(model, seg)], self.feat_vars + self.clas_vars[(model, seg)])
                    train_op_feat = feat_opt.apply_gradients(zip(grad[:-2], self.feat_vars))
                    train_op_clas = clas_opt.apply_gradients(zip(grad[-2:], self.clas_vars[(model, seg)]))
                    self.train_ops[(model, seg)] = tf.group(train_op_feat, train_op_clas)
        return self.losses, self.train_ops
项目:Youtube-8M-WILLOW    作者:antoine77340    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):    

    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.name_scope("model"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions, 
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:Y8M    作者:mpekalski    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:Y8M    作者:mpekalski    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      layers_keep_probs=tf.Variable([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], tf.float32, name="layers_keep_probs")
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False, 
          layers_keep_probs=layers_keep_probs)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:Y8M    作者:mpekalski    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      layers_keep_probs=tf.Variable([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], tf.float32, name="layers_keep_probs")
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False, 
          layers_keep_probs=layers_keep_probs)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def save_model_for_prediction(self, save_ckpt_fn, vars_to_save=None):
    """Save model data only needed for prediction.

    Args:
      save_ckpt_fn: checkpoint file to save.
      vars_to_save: a list of variables to save.
    """
    if vars_to_save is None:
      vars_to_save = slim.get_model_variables()
      vars_restore_to_exclude = []
      for scope in self.dm_model.restore_scope_exclude:
        vars_restore_to_exclude.extend(slim.get_variables(scope))
      # remove not restored variables.
      vars_to_save = [
          v for v in vars_to_save if v not in vars_restore_to_exclude
      ]
    base_model.save_model(save_ckpt_fn, self.sess, vars_to_save)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __add_summaries(self,end_points,learning_rate,total_loss):
        for end_point in end_points:
            x = end_points[end_point]
            tf.summary.histogram('activations/' + end_point, x)
            tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x))
        for loss in tf.get_collection(tf.GraphKeys.LOSSES):
            tf.summary.scalar('losses/%s' % loss.op.name, loss)
        # Add total_loss to summary.
        tf.summary.scalar('total_loss', total_loss)

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            tf.summary.histogram(variable.op.name, variable)
        tf.summary.scalar('learning_rate', learning_rate)

        return
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __add_summaries(self,end_points,learning_rate,total_loss):
        # Add summaries for end_points (activations).

        for end_point in end_points:
            x = end_points[end_point]
            tf.summary.histogram('activations/' + end_point, x)
            tf.summary.scalar('sparsity/' + end_point,
                                            tf.nn.zero_fraction(x))
        # Add summaries for losses and extra losses.

        tf.summary.scalar('total_loss', total_loss)
        for loss in tf.get_collection('EXTRA_LOSSES'):
            tf.summary.scalar(loss.op.name, loss)

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            tf.summary.histogram(variable.op.name, variable)

        return
项目:Youtube8mdataset_kagglechallenge    作者:jasonlee27    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:youtube    作者:taufikxu    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:google_ml_challenge    作者:SSUHan    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    model_input_raw, labels_batch = (
        self.reader.prepare_serialized_examples(serialized_examples))

    model_input = model_input_raw

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_classes=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      prediction, index = tf.nn.top_k(predictions, 1)
    return prediction, index
项目:google_ml_challenge    作者:SSUHan    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    image_id, model_input_raw, labels_batch = (
        self.reader.prepare_serialized_examples(serialized_examples))

    model_input = model_input_raw

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_classes=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      prediction, index = tf.nn.top_k(predictions, 1)
    return image_id, prediction, index
项目:GestureRecognition    作者:gkchai    | 项目源码 | 文件源码
def create_init_fn_to_restore(master_checkpoint, train_dir):
    """Creates an init operations to restore weights from various checkpoints.
    master_checkpoint is path to a checkpoint which contains all weights for
    the whole model.
    """
    if master_checkpoint is None:
        return None

    # Warn the user if a checkpoint exists in the train_dir. Then we'll be
    # ignoring the checkpoint path anyway.
    if tf.train.latest_checkpoint(train_dir):
        tf.logging.info(
            'Ignoring --checkpoint_path because a checkpoint already exists in %s'
            % train_dir)
        return None

    if tf.gfile.IsDirectory(master_checkpoint):
        checkpoint_path = tf.train.latest_checkpoint(master_checkpoint)
    else:
        checkpoint_path = master_checkpoint

    tf.logging.info('Fine-tuning from %s' % checkpoint_path)
    return slim.assign_from_checkpoint_fn(checkpoint_path, slim.get_model_variables())
项目:kaggle-youtube-8m    作者:liufuyang    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):    

    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.name_scope("model"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions, 
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:tutorial_mnist    作者:machine-learning-challenge    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    model_input_raw, labels_batch = (
        self.reader.prepare_serialized_examples(serialized_examples))

    model_input = model_input_raw

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_classes=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      prediction, index = tf.nn.top_k(predictions, 1)
    return prediction, index
项目:u8m_test    作者:hxkk    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:youtube-8m    作者:Tsingularity    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False,
          keep_prob=1.0)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:youtube-8m    作者:Tsingularity    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    video_id, model_input_raw, labels_batch, num_frames = (
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def disp_model_info():
    with tf.Graph().as_default():
        # Dummy placeholders for arbitrary number of 1d inputs and outputs

        inputs = tf.placeholder(tf.float32, shape=(None, 1))
        outputs = tf.placeholder(tf.float32, shape=(None, 1))

        # Build model
        predictions, end_points = regression_model(inputs)

        # Print name and shape of each tensor.
        print("Layers")
        for k, v in end_points.items():
            print('name = {}, shape = {}'.format(v.name, v.get_shape()))

        # Print name and shape of parameter nodes  (values not yet initialized)
        print("\n")
        print("Parameters")
        for v in slim.get_model_variables():
            print('name = {}, shape = {}'.format(v.name, v.get_shape()))

        print("\n")
        print("Local Parameters")
        for v in slim.get_local_variables():
            print('name = {}, shape = {}'.format(v.name, v.get_shape()))
    return
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def use_inceptionv4(self):
        image_size = inception.inception_v4.default_image_size
        img_path = "../../data/misec_images/EnglishCockerSpaniel_simon.jpg"
        checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"

        with tf.Graph().as_default():

            image_string = tf.read_file(img_path)
            image = tf.image.decode_jpeg(image_string, channels=3)
            processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
            processed_images  = tf.expand_dims(processed_image, 0)

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(processed_images, num_classes=1001, is_training=False)
            probabilities = tf.nn.softmax(logits)

            init_fn = slim.assign_from_checkpoint_fn(
                checkpoint_path,
                slim.get_model_variables('InceptionV4'))

            with tf.Session() as sess:
                init_fn(sess)
                np_image, probabilities = sess.run([image, probabilities])
                probabilities = probabilities[0, 0:]
                sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
                self.disp_names(sorted_inds,probabilities)

            plt.figure()
            plt.imshow(np_image.astype(np.uint8))
            plt.axis('off')
            plt.title(img_path)
            plt.show()



        return
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def use_vgg16(self):

        with tf.Graph().as_default():
            image_size = vgg.vgg_16.default_image_size
            img_path = "../../data/misec_images/First_Student_IC_school_bus_202076.jpg"
            checkpoint_path = "../../data/trained_models/vgg16/vgg_16.ckpt"

            image_string = tf.read_file(img_path)
            image = tf.image.decode_jpeg(image_string, channels=3)
            processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
            processed_images  = tf.expand_dims(processed_image, 0)

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(vgg.vgg_arg_scope()):
                # 1000 classes instead of 1001.
                logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False)
                probabilities = tf.nn.softmax(logits)

                init_fn = slim.assign_from_checkpoint_fn(
                    checkpoint_path,
                    slim.get_model_variables('vgg_16'))

                with tf.Session() as sess:
                    init_fn(sess)
                    np_image, probabilities = sess.run([image, probabilities])
                    probabilities = probabilities[0, 0:]
                    sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
                    self.disp_names(sorted_inds,probabilities,include_background=False)

                plt.figure()
                plt.imshow(np_image.astype(np.uint8))
                plt.axis('off')
                plt.title(img_path)
                plt.show()
        return
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def _add_variables_summaries(learning_rate):
    summaries = []
    for variable in slim.get_model_variables():
        summaries.append(tf.summary.histogram(variable.op.name, variable))
    summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
    return summaries
项目:convolutional-vqa    作者:paarthneekhara    | 项目源码 | 文件源码
def create_resnet_model(img_dim):
    pre_image = tf.placeholder(tf.float32, [None, None, 3])
    processed_image = cnn_preprocessing.preprocess_for_eval(pre_image/255.0, img_dim, img_dim)

    images = tf.placeholder(tf.float32, [None, img_dim, img_dim, 3])
    # mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
    # processed_images = images - mean
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
        probs, endpoints = resnet_v2.resnet_v2_152(images, num_classes=1001, is_training = False)
        print endpoints['resnet_v2_152/block4']

    init_fn = slim.assign_from_checkpoint_fn(
            'Data/CNNModels/resnet_v2_152.ckpt',
            slim.get_model_variables('resnet_v2_152'))

    sess = tf.Session()
    init_fn(sess)

    return {
        'images_placeholder' : images,
        'block4' : endpoints['resnet_v2_152/block4'],
        'session' : sess,
        'processed_image' : processed_image,
        'pre_image' : pre_image,
        'probs' : probs
    }
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def _add_variables_summaries(learning_rate):
  summaries = []
  for variable in slim.get_model_variables():
    summaries.append(tf.summary.histogram(variable.op.name, variable))
  summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
  return summaries
项目:multibox    作者:gvanhorn38    | 项目源码 | 文件源码
def build_finetunable_model(inputs, cfg):

  with slim.arg_scope([slim.conv2d], 
                      activation_fn=tf.nn.relu,
                      normalizer_fn=slim.batch_norm,
                      weights_regularizer=slim.l2_regularizer(0.00004),
                      biases_regularizer=slim.l2_regularizer(0.00004)) as scope:

      batch_norm_params = {
        'decay': cfg.BATCHNORM_MOVING_AVERAGE_DECAY,
        'epsilon': 0.001,
        'variables_collections' : [],
        'is_training' : False
      }
      with slim.arg_scope([slim.conv2d], normalizer_params=batch_norm_params):
        features, _ = model.inception_resnet_v2(inputs, reuse=False, scope='InceptionResnetV2')

      # Save off the original variables (for ease of restoring)
      model_variables = slim.get_model_variables()
      inception_vars = {var.op.name:var for var in model_variables}

      batch_norm_params = {
        'decay': cfg.BATCHNORM_MOVING_AVERAGE_DECAY,
        'epsilon': 0.001,
        'variables_collections' : [tf.GraphKeys.MOVING_AVERAGE_VARIABLES],
        'is_training' : True
      }
      with slim.arg_scope([slim.conv2d], normalizer_params=batch_norm_params):

        # Add on the detection heads
        locs, confs, _ = model.build_detection_heads(features, cfg.NUM_BBOXES_PER_CELL)
        model_variables = slim.get_model_variables()
        detection_vars = {var.op.name:var for var in model_variables if var.op.name not in inception_vars}

  return locs, confs, inception_vars, detection_vars
项目:youtube-8m    作者:Tsingularity    | 项目源码 | 文件源码
def build_prediction_graph(self, serialized_examples):
    # video_id, model_input_raw, labels_batch, num_frames = (
    ### Newly
    video_id, model_input_raw, labels_batch, coarse_labels_batch, num_frames = (
    ###
        self.reader.prepare_serialized_examples(serialized_examples))

    feature_dim = len(model_input_raw.get_shape()) - 1
    model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)

    with tf.variable_scope("tower"):
      result = self.model.create_model(
          model_input,
          num_frames=num_frames,
          vocab_size=self.reader.num_classes,
          labels=labels_batch,
          ### Newly
          coarse_labels=coarse_labels_batch,
          ###
          is_training=False)

      for variable in slim.get_model_variables():
        tf.summary.histogram(variable.op.name, variable)

      predictions = result["predictions"]

      top_predictions, top_indices = tf.nn.top_k(predictions,
          _TOP_PREDICTIONS_IN_OUTPUT)
    return video_id, top_indices, top_predictions
项目:DenseHumanBodyCorrespondences    作者:halimacc    | 项目源码 | 文件源码
def forward(self, input):
        self.depth = input
        conv = self._conv
        upconv = self._upconv
        maxpool = self._maxpool
        with tf.variable_scope('feature'):
            with tf.variable_scope('encoder'):
                conv1 = conv(self.depth, 96, 11, 4)                         # H/4
                pool1 = maxpool(conv1, 3)                                   # H/8
                conv2 = conv(pool1, 256, 5, 1)                              # H/8
                pool2 = maxpool(conv2, 3)                                   # H/16
                conv3 = conv(pool2, 384, 3, 1)                              # H/16
                conv4 = conv(conv3, 384, 3, 1)                              # H/16
                conv5 = conv(conv4, 256, 3, 1)                              # H/16
                pool5 = maxpool(conv5, 3)                                   # H/32
                conv6 = conv(pool5, 4096, 1, 1)                             # H/32
                conv7 = conv(conv6, 4096, 1, 1)                             # H/32

            with tf.variable_scope('skips'):
                skip1 = conv1
                skip2 = conv2
                skip3 = conv5

            with tf.variable_scope('decoder'):
                upconv5 = upconv(conv7,  256, 3, 2)                         #H/16
                concat5 = tf.concat([upconv5, skip3], 3)
                iconv5  = conv(concat5,  256, 3, 1)

                upconv4 = upconv(iconv5,  256, 3, 2)                        #H/8
                concat4 = tf.concat([upconv4, skip2], 3)
                iconv4  = conv(concat4,  256, 3, 1)

                upconv3 = upconv(iconv4,  96, 3, 2)                         #H/4
                concat3 = tf.concat([upconv3, skip1], 3)
                iconv3  = conv(concat3,  96, 3, 1)

                upconv2 = upconv(iconv3,  48, 3, 2)                         #H/2
                upconv1 = upconv(upconv2,  16, 3, 2)                        #H/1
                self.feature = upconv1

        self.feat_vars = slim.get_model_variables()
        return self.feature
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def train(self):
        img_size = [self.image_height, self.image_width, self.image_depth]
        train_batch = tf.train.shuffle_batch([read_tfrecord(self.train_file, img_size)],
                    batch_size = self.train_batch_size,
                    capacity = 3000,
                    num_threads = 2,
                    min_after_dequeue = 1000)
        test_batch = tf.train.shuffle_batch([read_tfrecord(self.test_file, img_size)],
                    batch_size = self.test_batch_size,
                    capacity = 500,
                    num_threads = 2,
                    min_after_dequeue = 300)
        init = tf.global_variables_initializer()
        init_fn = slim.assign_from_checkpoint_fn("resnet_v2_50.ckpt", slim.get_model_variables('resnet_v2'))
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(init)
            init_fn(sess)
            train_writer = tf.summary.FileWriter(self.log_dir + "/train", sess.graph)
            test_writer  = tf.summary.FileWriter(self.log_dir + "/test", sess.graph)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            inputs_test, outputs_gt_test = build_img_pair(sess.run(test_batch))
            for iter in range(self.max_iteration):
                inputs_train, outputs_gt_train = build_img_pair(sess.run(train_batch))
                # train with dynamic learning rate
                if iter <= 500:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-3, self.batch_size:self.train_batch_size})
                elif iter <= self.max_iteration - 1000:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:0.5e-3, self.batch_size:self.train_batch_size})
                else:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-4, self.batch_size:self.train_batch_size})
                # print training loss and test loss
                if iter%10 == 0:
                    summary_train = sess.run(self.summary, {self.input_data:inputs_train, self.gt:outputs_gt_train,
                                             self.batch_size:self.train_batch_size})
                    train_writer.add_summary(summary_train, iter)
                    train_writer.flush()
                    summary_test = sess.run(self.summary, {self.input_data:inputs_test, self.gt:outputs_gt_test,
                                             self.batch_size:self.test_batch_size})
                    test_writer.add_summary(summary_test, iter)
                    test_writer.flush()
                # record training loss and test loss
                if iter%10 == 0:
                    train_loss  = self.cross_entropy.eval({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                                    self.batch_size:self.train_batch_size})
                    test_loss   = self.cross_entropy.eval({self.input_data:inputs_test, self.gt:outputs_gt_test,
                                                    self.batch_size:self.test_batch_size})
                    print("iter step %d trainning batch loss %f"%(iter, train_loss))
                    print("iter step %d test loss %f\n"%(iter, test_loss))
                # record model
                if iter%100 == 0:
                    saver.save(sess, self.log_dir + "/model.ckpt", global_step=iter)
            coord.request_stop()
            coord.join(threads)
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def train(self):
        img_size = [self.image_height, self.image_width, self.image_depth]
        train_batch = tf.train.shuffle_batch([read_tfrecord(self.train_file, img_size)],
                    batch_size = self.train_batch_size,
                    capacity = 3000,
                    num_threads = 2,
                    min_after_dequeue = 1000)
        test_batch = tf.train.shuffle_batch([read_tfrecord(self.test_file, img_size)],
                    batch_size = self.test_batch_size,
                    capacity = 500,
                    num_threads = 2,
                    min_after_dequeue = 300)
        init = tf.global_variables_initializer()
        init_fn = slim.assign_from_checkpoint_fn("resnet_v2_50.ckpt", slim.get_model_variables('resnet_v2'))
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(init)
            init_fn(sess)
            train_writer = tf.summary.FileWriter(self.log_dir + "/train", sess.graph)
            test_writer  = tf.summary.FileWriter(self.log_dir + "/test", sess.graph)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            inputs_test, outputs_gt_test = build_img_pair(sess.run(test_batch))
            for iter in range(self.max_iteration):
                inputs_train, outputs_gt_train = build_img_pair(sess.run(train_batch))
                # train with dynamic learning rate
                if iter <= 500:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-3, self.batch_size:self.train_batch_size})
                elif iter <= self.max_iteration - 1000:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:0.5e-3, self.batch_size:self.train_batch_size})
                else:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-4, self.batch_size:self.train_batch_size})
                # print training loss and test loss
                if iter%10 == 0:
                    summary_train = sess.run(self.summary, {self.input_data:inputs_train, self.gt:outputs_gt_train,
                                             self.batch_size:self.train_batch_size})
                    train_writer.add_summary(summary_train, iter)
                    train_writer.flush()
                    summary_test = sess.run(self.summary, {self.input_data:inputs_test, self.gt:outputs_gt_test,
                                             self.batch_size:self.test_batch_size})
                    test_writer.add_summary(summary_test, iter)
                    test_writer.flush()
                # record training loss and test loss
                if iter%10 == 0:
                    train_loss  = self.cross_entropy.eval({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                                    self.batch_size:self.train_batch_size})
                    test_loss   = self.cross_entropy.eval({self.input_data:inputs_test, self.gt:outputs_gt_test,
                                                    self.batch_size:self.test_batch_size})
                    print("iter step %d trainning batch loss %f"%(iter, train_loss))
                    print("iter step %d test loss %f\n"%(iter, test_loss))
                # record model
                if iter%100 == 0:
                    saver.save(sess, self.log_dir + "/model.ckpt", global_step=iter)
            coord.request_stop()
            coord.join(threads)
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def train(self):
        img_size = [self.image_height, self.image_width, self.image_depth]
        train_batch = tf.train.shuffle_batch([read_tfrecord(self.train_file, img_size)],
                    batch_size = self.train_batch_size,
                    capacity = 2000,
                    num_threads = 2,
                    min_after_dequeue = 1000)
        test_batch = tf.train.shuffle_batch([read_tfrecord(self.test_file, img_size)],
                    batch_size = self.test_batch_size,
                    capacity = 500,
                    num_threads = 2,
                    min_after_dequeue = 300)
        init = tf.global_variables_initializer()
        init_fn = slim.assign_from_checkpoint_fn("vgg_16.ckpt", slim.get_model_variables('vgg_16'))
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(init)
            init_fn(sess)
            train_writer = tf.summary.FileWriter(self.log_dir + "/train", sess.graph)
            test_writer  = tf.summary.FileWriter(self.log_dir + "/test", sess.graph)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            inputs_test, outputs_gt_test = build_img_pair(sess.run(test_batch))
            for iter in range(self.max_iteration):
                inputs_train, outputs_gt_train = build_img_pair(sess.run(train_batch))
                # train with dynamic learning rate
                if iter <= 500:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train, self.is_training:True,
                                    self.learning_rate:1e-4})
                elif iter <= self.max_iteration - 1000:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train, self.is_training:True,
                                    self.learning_rate:0.5e-4})
                else:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train, self.is_training:True,
                                    self.learning_rate:1e-5})
                # print training loss and test loss
                if iter%10 == 0:
                    summary_train = sess.run(self.summary, {self.input_data:inputs_train, self.gt:outputs_gt_train,
                                             self.is_training:False})
                    train_writer.add_summary(summary_train, iter)
                    train_writer.flush()
                    summary_test = sess.run(self.summary, {self.input_data:inputs_test, self.gt:outputs_gt_test,
                                            self.is_training:False})
                    test_writer.add_summary(summary_test, iter)
                    test_writer.flush()
                # record training loss and test loss
                if iter%10 == 0:
                    train_loss  = self.cross_entropy.eval({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                                    self.is_training:False})
                    test_loss   = self.cross_entropy.eval({self.input_data:inputs_test, self.gt:outputs_gt_test,
                                                    self.is_training:False})
                    print("iter step %d trainning batch loss %f"%(iter, train_loss))
                    print("iter step %d test loss %f\n"%(iter, test_loss))
                # record model
                if iter%100 == 0:
                    saver.save(sess, self.log_dir + "/model.ckpt", global_step=iter)
            coord.request_stop()
            coord.join(threads)
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def train(self):
        img_size = [self.image_height, self.image_width, self.image_depth]
        train_batch = tf.train.shuffle_batch([read_tfrecord(self.train_file, img_size)],
                    batch_size = self.train_batch_size,
                    capacity = 3000,
                    num_threads = 2,
                    min_after_dequeue = 1000)
        test_batch = tf.train.shuffle_batch([read_tfrecord(self.test_file, img_size)],
                    batch_size = self.test_batch_size,
                    capacity = 500,
                    num_threads = 2,
                    min_after_dequeue = 300)
        init = tf.global_variables_initializer()
        init_fn = slim.assign_from_checkpoint_fn("resnet_v2_50.ckpt", slim.get_model_variables('resnet_v2'))
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(init)
            init_fn(sess)
            train_writer = tf.summary.FileWriter(self.log_dir + "/train", sess.graph)
            test_writer  = tf.summary.FileWriter(self.log_dir + "/test", sess.graph)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            inputs_test, outputs_gt_test = build_img_pair(sess.run(test_batch))
            for iter in range(self.max_iteration):
                inputs_train, outputs_gt_train = build_img_pair(sess.run(train_batch))
                # train with dynamic learning rate
                if iter <= 500:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-3, self.batch_size:self.train_batch_size})
                elif iter <= self.max_iteration - 1000:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:0.5e-3, self.batch_size:self.train_batch_size})
                else:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-4, self.batch_size:self.train_batch_size})
                # print training loss and test loss
                if iter%10 == 0:
                    summary_train = sess.run(self.summary, {self.input_data:inputs_train, self.gt:outputs_gt_train,
                                             self.batch_size:self.train_batch_size})
                    train_writer.add_summary(summary_train, iter)
                    train_writer.flush()
                    summary_test = sess.run(self.summary, {self.input_data:inputs_test, self.gt:outputs_gt_test,
                                             self.batch_size:self.test_batch_size})
                    test_writer.add_summary(summary_test, iter)
                    test_writer.flush()
                # record training loss and test loss
                if iter%10 == 0:
                    train_loss  = self.cross_entropy.eval({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                                    self.batch_size:self.train_batch_size})
                    test_loss   = self.cross_entropy.eval({self.input_data:inputs_test, self.gt:outputs_gt_test,
                                                    self.batch_size:self.test_batch_size})
                    print("iter step %d trainning batch loss %f"%(iter, train_loss))
                    print("iter step %d test loss %f\n"%(iter, test_loss))
                # record model
                if iter%100 == 0:
                    saver.save(sess, self.log_dir + "/model.ckpt", global_step=iter)
            coord.request_stop()
            coord.join(threads)
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def train(self):
        img_size = [self.image_height, self.image_width, self.image_depth]
        train_batch = tf.train.shuffle_batch([read_tfrecord(self.train_file, img_size)],
                    batch_size = self.train_batch_size,
                    capacity = 2000,
                    num_threads = 2,
                    min_after_dequeue = 1000)
        test_batch = tf.train.shuffle_batch([read_tfrecord(self.test_file, img_size)],
                    batch_size = self.test_batch_size,
                    capacity = 500,
                    num_threads = 2,
                    min_after_dequeue = 300)
        init = tf.global_variables_initializer()
        init_fn = slim.assign_from_checkpoint_fn("vgg_16.ckpt", slim.get_model_variables('vgg_16'))
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(init)
            init_fn(sess)
            train_writer = tf.summary.FileWriter(self.log_dir + "/train", sess.graph)
            test_writer  = tf.summary.FileWriter(self.log_dir + "/test", sess.graph)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            inputs_test, outputs_gt_test = build_img_pair(sess.run(test_batch))
            for iter in range(self.max_iteration):
                inputs_train, outputs_gt_train = build_img_pair(sess.run(train_batch))
                # train with dynamic learning rate
                if iter <= 500:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-4, self.is_training:True})
                elif iter <= self.max_iteration - 1000:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:0.5e-4, self.is_training:True})
                else:
                    self.train_step.run({self.input_data:inputs_train, self.gt:outputs_gt_train,
                                    self.learning_rate:1e-5, self.is_training:True})
                # print training loss and test loss
                if iter%10 == 0:
                    summary_train = sess.run(self.summary, {self.input_data:inputs_train, self.gt:outputs_gt_train, self.is_training:False})
                    train_writer.add_summary(summary_train, iter)
                    train_writer.flush()
                    summary_test = sess.run(self.summary, {self.input_data:inputs_test, self.gt:outputs_gt_test, self.is_training:False})
                    test_writer.add_summary(summary_test, iter)
                    test_writer.flush()
                # record training loss and test loss
                if iter%10 == 0:
                    train_loss  = self.cross_entropy.eval({self.input_data:inputs_train, self.gt:outputs_gt_train, self.is_training:False})
                    test_loss   = self.cross_entropy.eval({self.input_data:inputs_test, self.gt:outputs_gt_test, self.is_training:False})
                    print("iter step %d trainning batch loss %f"%(iter, train_loss))
                    print("iter step %d test loss %f\n"%(iter, test_loss))
                # record model
                if iter%100 == 0:
                    saver.save(sess, self.log_dir + "/model.ckpt", global_step=iter)
            coord.request_stop()
            coord.join(threads)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __get_init_fn(self):
        """Returns a function run by the chief worker to warm-start the training.

        Note that the init_fn is only run when initializing the model during the very
        first global step.

        Returns:
            An init function run by the supervisor.
        """

        if self.checkpoint_path is None:
            return None

        # Warn the user if a checkpoint exists in the train_dir. Then we'll be
        # ignoring the checkpoint anyway.
        if tf.train.latest_checkpoint(self.train_dir):
            tf.logging.info(
                    'Ignoring --checkpoint_path because a checkpoint already exists in %s'
                    % self.train_dir)
            return None

        exclusions = []
        if self.checkpoint_exclude_scopes:
            exclusions = [scope.strip()
                                        for scope in self.checkpoint_exclude_scopes.split(',')]

        # TODO(sguada) variables.filter_variables()
        variables_to_restore = []
        for var in slim.get_model_variables():
            excluded = False
            for exclusion in exclusions:
                if var.op.name.startswith(exclusion):
                    excluded = True
                    break
            if not excluded:
                variables_to_restore.append(var)

        if tf.gfile.IsDirectory(self.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(self.checkpoint_path)
        else:
            checkpoint_path = self.checkpoint_path

        tf.logging.info('Fine-tuning from %s' % checkpoint_path)

        return slim.assign_from_checkpoint_fn(
                checkpoint_path,
                variables_to_restore,
                ignore_missing_vars=self.ignore_missing_vars)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def _get_init_fn():
    """Returns a function run by the chief worker to warm-start the training.

    Note that the init_fn is only run when initializing the model during the very
    first global step.

    Returns:
        An init function run by the supervisor.
    """
    if FLAGS.checkpoint_path is None:
        return None

    # Warn the user if a checkpoint exists in the train_dir. Then we'll be
    # ignoring the checkpoint anyway.
    if tf.train.latest_checkpoint(FLAGS.train_dir):
        tf.logging.info(
                'Ignoring --checkpoint_path because a checkpoint already exists in %s'
                % FLAGS.train_dir)
        return None

    exclusions = []
    if FLAGS.checkpoint_exclude_scopes:
        exclusions = [scope.strip()
                                    for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

    # TODO(sguada) variables.filter_variables()
    variables_to_restore = []
    for var in slim.get_model_variables():
        excluded = False
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                excluded = True
                break
        if not excluded:
            variables_to_restore.append(var)

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
        checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Fine-tuning from %s' % checkpoint_path)

    return slim.assign_from_checkpoint_fn(
            checkpoint_path,
            variables_to_restore,
            ignore_missing_vars=FLAGS.ignore_missing_vars)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __get_init_fn(self):
        """Returns a function run by the chief worker to warm-start the training.

        Note that the init_fn is only run when initializing the model during the very
        first global step.

        Returns:
            An init function run by the supervisor.
        """  

        if self.checkpoint_path is None:
            return None

        # Warn the user if a checkpoint exists in the train_dir. Then we'll be
        # ignoring the checkpoint anyway.


        if tf.train.latest_checkpoint(self.train_dir):
            tf.logging.info(
                    'Ignoring --checkpoint_path because a checkpoint already exists in %s'
                    % self.train_dir)
            return None

        exclusions = []
        if self.checkpoint_exclude_scopes:
            exclusions = [scope.strip()
                                        for scope in self.checkpoint_exclude_scopes.split(',')]

        # TODO(sguada) variables.filter_variables()
        variables_to_restore = []
        all_variables = slim.get_model_variables()
        if self.fine_tune_vgg16:
            global_step = slim.get_or_create_global_step()
            all_variables.append(global_step)
        for var in all_variables:
            excluded = False

            for exclusion in exclusions:
                if var.op.name.startswith(exclusion):
                    excluded = True
                    break
            if not excluded:
                variables_to_restore.append(var)

        if tf.gfile.IsDirectory(self.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(self.checkpoint_path)
        else:
            checkpoint_path = self.checkpoint_path

        tf.logging.info('Fine-tuning from %s' % checkpoint_path)

        return slim.assign_from_checkpoint_fn(
                checkpoint_path,
                variables_to_restore,
                ignore_missing_vars=self.ignore_missing_vars)
项目:MobileNet    作者:Zehaos    | 项目源码 | 文件源码
def _get_init_fn():
  """Returns a function run by the chief worker to warm-start the training.

  Note that the init_fn is only run when initializing the model during the very
  first global step.

  Returns:
    An init function run by the supervisor.
  """
  if FLAGS.checkpoint_path is None:
    return None

  # Warn the user if a checkpoint exists in the train_dir. Then we'll be
  # ignoring the checkpoint anyway.
  if tf.train.latest_checkpoint(FLAGS.train_dir):
    tf.logging.info(
      'Ignoring --checkpoint_path because a checkpoint already exists in %s'
      % FLAGS.train_dir)
    return None

  exclusions = []
  if FLAGS.checkpoint_exclude_scopes:
    exclusions = [scope.strip()
                  for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

  # TODO(sguada) variables.filter_variables()
  variables_to_restore = []
  for var in slim.get_model_variables():
    excluded = False
    for exclusion in exclusions:
      if var.op.name.startswith(exclusion):
        excluded = True
        break
    if not excluded:
      variables_to_restore.append(var)

  if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
    checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
  else:
    checkpoint_path = FLAGS.checkpoint_path

  tf.logging.info('Fine-tuning from %s' % checkpoint_path)

  return slim.assign_from_checkpoint_fn(
    checkpoint_path,
    variables_to_restore,
    ignore_missing_vars=FLAGS.ignore_missing_vars)
项目:sact    作者:mfigurnov    | 项目源码 | 文件源码
def export_to_h5(checkpoint_dir, export_path, images, end_points, num_samples,
                 batch_size, sact):
  """Exports ponder cost maps and other useful info to an HDF5 file."""
  output_file = h5py.File(export_path, 'w')

  output_file.attrs['block_scopes'] = end_points['block_scopes']
  keys_to_tensors = {}
  for block_scope in end_points['block_scopes']:
    for k in ('{}/ponder_cost'.format(block_scope),
              '{}/num_units'.format(block_scope),
              '{}/halting_distribution'.format(block_scope),
              '{}/flops'.format(block_scope)):
      keys_to_tensors[k] = end_points[k]
  keys_to_tensors['images'] = images
  keys_to_tensors['flops'] = end_points['flops']

  if sact:
    keys_to_tensors['ponder_cost_map'] = sact_map(end_points, 'ponder_cost')
    keys_to_tensors['num_units_map'] = sact_map(end_points, 'num_units')

  keys_to_datasets = {}
  for key, tensor in keys_to_tensors.iteritems():
    sh = tensor.get_shape().as_list()
    sh[0] = num_samples
    print(key, sh)
    keys_to_datasets[key] = output_file.create_dataset(
        key, sh, compression='lzf')

  variables_to_restore = slim.get_model_variables()
  checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
  assert checkpoint_path is not None
  init_fn = slim.assign_from_checkpoint_fn(checkpoint_path,
                                           variables_to_restore)

  sv = tf.train.Supervisor(
      graph=tf.get_default_graph(),
      logdir=None,
      summary_op=None,
      summary_writer=None,
      global_step=None,
      saver=None)

  assert num_samples % batch_size == 0
  num_batches = num_samples // batch_size

  with sv.managed_session('', start_standard_services=False) as sess:
    init_fn(sess)
    sv.start_queue_runners(sess)

    for i in range(num_batches):
      tf.logging.info('Evaluating batch %d/%d', i + 1, num_batches)
      end_points_out = sess.run(keys_to_tensors)
      for key, dataset in keys_to_datasets.iteritems():
        dataset[i * batch_size:(i + 1) * batch_size, ...] = end_points_out[key]