Python tensorflow.contrib.slim 模块,get_variables_to_restore() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用tensorflow.contrib.slim.get_variables_to_restore()

项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def set_key_vars(self, restore_scope_exclude, train_scopes):
    """Set critical variables for relevant tasks.

    Set vars_to_train and vars_to_restore.
    Called after build_model.

    Args:
      restore_scope_exclude: variable scopes to exclude for restoring.
      train_scopes: variable scopes to train.
    """
    # self.dm_model.use_graph()
    self.vars_to_restore = slim.get_variables_to_restore(
        exclude=restore_scope_exclude)
    self.vars_to_train = []
    if train_scopes is not None:
      for scope in train_scopes:
        variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
        self.vars_to_train.extend(variables)
    if not self.vars_to_train:
      print "[set_key_vars: info] No variables to train were defined." \
            " Will train ALL variables."
      self.vars_to_train = None
    #base_model.print_variable_names(self.vars_to_train)
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def test_model_restore(self):
    model_path = model_common.get_builtin_net_weights_fn(
        commons.ModelType.INCEPTION_V4)
    reader = pywrap_tensorflow.NewCheckpointReader(model_path)
    inputs = tf.placeholder(tf.float32, shape=(None, 299, 299, 3))
    model_common.create_builtin_net(commons.ModelType.INCEPTION_V3, inputs, 78)
    vars_to_restore = slim.get_variables_to_restore(
        exclude=["InceptionV3/Logits"])
    if isinstance(vars_to_restore, (tuple, list)):
      vars_to_restore = {var.op.name: var for var in vars_to_restore}
    for checkpoint_var_name in vars_to_restore:
      var = vars_to_restore[checkpoint_var_name]
      if not reader.has_tensor(checkpoint_var_name):
        raise ValueError('Checkpoint is missing variable [%s]' %
                         checkpoint_var_name)
      var_value = reader.get_tensor(checkpoint_var_name)
      print "tensor {} has shape {}, and its value has shape {}".format(
          checkpoint_var_name, var.get_shape(), var_value.shape)
      new_value = var_value.reshape(var.get_shape())
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def train(self):

        s = tf.Session()

        init_fn = slim.assign_from_checkpoint_fn("./vgg_19.ckpt", slim.get_variables_to_restore(exclude = ['generate_image']))
        #optimizer = tf.train.AdamOptimizer(learning_rate = 1e-1, beta1 = 0.5, beta2 = 0.5).minimize(self.loss, var_list = [self.target])
        optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, options={'maxiter': 1000}, var_list = [self.target])

        s.run(tf.global_variables_initializer())
        init_fn(s)

        #for i in range(10000):
        #    _, loss_out = s.run([optimizer, self.loss])
        #    print("Current loss is: %.3f" %loss_out, end="\r")
        #print("")

        optimizer.minimize(s)
        loss_out = s.run(self.loss)
        print("Final loss: %.3f" %loss_out)

        plt.imshow(np.clip(s.run(self.target), 0, 255).astype(np.uint8))
        plt.show()
项目:deep_sort    作者:nwojke    | 项目源码 | 文件源码
def _create_image_encoder(preprocess_fn, factory_fn, image_shape, batch_size=32,
                         session=None, checkpoint_path=None,
                         loss_mode="cosine"):
    image_var = tf.placeholder(tf.uint8, (None, ) + image_shape)

    preprocessed_image_var = tf.map_fn(
        lambda x: preprocess_fn(x, is_training=False),
        tf.cast(image_var, tf.float32))

    l2_normalize = loss_mode == "cosine"
    feature_var, _ = factory_fn(
        preprocessed_image_var, l2_normalize=l2_normalize, reuse=None)
    feature_dim = feature_var.get_shape().as_list()[-1]

    if session is None:
        session = tf.Session()
    if checkpoint_path is not None:
        slim.get_or_create_global_step()
        init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
            checkpoint_path, slim.get_variables_to_restore())
        session.run(init_assign_op, feed_dict=init_feed_dict)

    def encoder(data_x):
        out = np.zeros((len(data_x), feature_dim), np.float32)
        _run_in_batches(
            lambda x: session.run(feature_var, feed_dict=x),
            {image_var: data_x}, out, batch_size)
        return out

    return encoder
项目:deeplearning    作者:zxjzxj9    | 项目源码 | 文件源码
def __init__(self, content, style, content_names, style_names):
        """
            Suppose the content and style is a numpy array,
        """

        self.content_names = content_names
        self.style_names = style_names
        self.VGG_MEAN = [123.68, 116.78, 103.94]

        tf.reset_default_graph()
        content = tf.constant(content) - tf.reshape(tf.constant(self.VGG_MEAN), [1, 1, 3])
        _, self.content_layers = nets.vgg.vgg_19(tf.expand_dims(content, axis = 0), is_training = False, spatial_squeeze = False)

        layer_name, layer_value = zip(*filter(lambda x: x[0] in content_names,  self.content_layers.items()))
        init_fn = slim.assign_from_checkpoint_fn("./vgg_19.ckpt", slim.get_variables_to_restore())
        with tf.Session() as s, tf.device("/device:XLA_CPU:0"):
            init_fn(s)
            layer_value = s.run(layer_value)

        self.content_map = dict(zip(layer_name, layer_value))
        #print(content_map)

        tf.reset_default_graph()
        style = tf.constant(style) - tf.reshape(tf.constant(self.VGG_MEAN), [1, 1, 3])
        _, self.style_layers = nets.vgg.vgg_19(tf.expand_dims(style, axis = 0), is_training = False, spatial_squeeze =  False)
        layer_name, layer_value = zip(*filter(lambda x: x[0] in style_names,  self.style_layers.items()))
        init_fn = slim.assign_from_checkpoint_fn("./vgg_19.ckpt", slim.get_variables_to_restore())

        with tf.Session() as s, tf.device("/device:XLA_CPU:0"):
            init_fn(s)
            layer_value = s.run(layer_value)

        self.style_map = dict(zip(layer_name, layer_value))
        #print(content_map)

        tf.reset_default_graph()
        self.target = tf.Variable(np.random.randint(0, 256, content.shape), dtype = tf.float32, name = "generate_image")
        self._build_graph()
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def use_fined_model(self):
        image_size = inception.inception_v4.default_image_size
        batch_size = 3
        flowers_data_dir = "../../data/flower"
        train_dir = '/tmp/inception_finetuned/'

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)

            dataset = flowers.get_split('train', flowers_data_dir)
            images, images_raw, labels = self.load_batch(dataset, height=image_size, width=image_size)

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)

            probabilities = tf.nn.softmax(logits)

            checkpoint_path = tf.train.latest_checkpoint(train_dir)
            init_fn = slim.assign_from_checkpoint_fn(
              checkpoint_path,
              slim.get_variables_to_restore())

            with tf.Session() as sess:
                with slim.queues.QueueRunners(sess):
                    sess.run(tf.initialize_local_variables())
                    init_fn(sess)
                    np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])

                    for i in range(batch_size): 
                        image = np_images_raw[i, :, :, :]
                        true_label = np_labels[i]
                        predicted_label = np.argmax(np_probabilities[i, :])
                        predicted_name = dataset.labels_to_names[predicted_label]
                        true_name = dataset.labels_to_names[true_label]

                        plt.figure()
                        plt.imshow(image.astype(np.uint8))
                        plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
                        plt.axis('off')
                        plt.show()
                return
项目:yolo-tf    作者:ruiminshen    | 项目源码 | 文件源码
def main():
    model = config.get('config', 'model')
    cachedir = utils.get_cachedir(config)
    with open(os.path.join(cachedir, 'names'), 'r') as f:
        names = [line.strip() for line in f]
    width = config.getint(model, 'width')
    height = config.getint(model, 'height')
    yolo = importlib.import_module('model.' + model)
    cell_width, cell_height = utils.calc_cell_width_height(config, width, height)
    tf.logging.info('(width, height)=(%d, %d), (cell_width, cell_height)=(%d, %d)' % (width, height, cell_width, cell_height))
    with tf.Session() as sess:
        paths = [os.path.join(cachedir, profile + '.tfrecord') for profile in args.profile]
        num_examples = sum(sum(1 for _ in tf.python_io.tf_record_iterator(path)) for path in paths)
        tf.logging.warn('num_examples=%d' % num_examples)
        image_rgb, labels = utils.data.load_image_labels(paths, len(names), width, height, cell_width, cell_height, config)
        image_std = tf.image.per_image_standardization(image_rgb)
        image_rgb = tf.cast(image_rgb, tf.uint8)
        ph_image = tf.placeholder(image_std.dtype, [1] + image_std.get_shape().as_list(), name='ph_image')
        global_step = tf.contrib.framework.get_or_create_global_step()
        builder = yolo.Builder(args, config)
        builder(ph_image)
        variables_to_restore = slim.get_variables_to_restore()
        ph_labels = [tf.placeholder(l.dtype, [1] + l.get_shape().as_list(), name='ph_' + l.op.name) for l in labels]
        with tf.name_scope('total_loss') as name:
            builder.create_objectives(ph_labels)
            total_loss = tf.losses.get_total_loss(name=name)
        tf.global_variables_initializer().run()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess, coord)
        _image_rgb, _image_std, _labels = sess.run([image_rgb, image_std, labels])
        coord.request_stop()
        coord.join(threads)
        feed_dict = dict([(ph, np.expand_dims(d, 0)) for ph, d in zip(ph_labels, _labels)])
        feed_dict[ph_image] = np.expand_dims(_image_std, 0)
        logdir = utils.get_logdir(config)
        assert os.path.exists(logdir)
        model_path = tf.train.latest_checkpoint(logdir)
        tf.logging.info('load ' + model_path)
        slim.assign_from_checkpoint_fn(model_path, variables_to_restore)(sess)
        tf.logging.info('global_step=%d' % sess.run(global_step))
        tf.logging.info('total_loss=%f' % sess.run(total_loss, feed_dict))
        _ = Drawer(sess, names, builder.model.cell_width, builder.model.cell_height, _image_rgb, _labels, builder.model, feed_dict)
        plt.show()