我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.get_default_session()。
def load_model(model): # Check if the model is a model directory (containing a metagraph and a checkpoint file) # or if it is a protobuf file with a frozen graph model_exp = os.path.expanduser(model) if (os.path.isfile(model_exp)): print('Model filename: %s' % model_exp) with gfile.FastGFile(model_exp,'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') else: print('Model directory: %s' % model_exp) meta_file, ckpt_file = get_model_filenames(model_exp) print('Metagraph file: %s' % meta_file) print('Checkpoint file: %s' % ckpt_file) saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file)) saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def __call__(self, *inputvals): assert len(inputvals) == len(self.nondata_inputs) + len(self.data_inputs) nondata_vals = inputvals[0:len(self.nondata_inputs)] data_vals = inputvals[len(self.nondata_inputs):] feed_dict = dict(zip(self.nondata_inputs, nondata_vals)) n = data_vals[0].shape[0] for v in data_vals[1:]: assert v.shape[0] == n for i_start in range(0, n, self.batch_size): slice_vals = [v[i_start:min(i_start+self.batch_size, n)] for v in data_vals] for (var,val) in zip(self.data_inputs, slice_vals): feed_dict[var]=val results = tf.get_default_session().run(self.outputs, feed_dict=feed_dict) if i_start==0: sum_results = results else: for i in range(len(results)): sum_results[i] = sum_results[i] + results[i] for i in range(len(results)): sum_results[i] = sum_results[i] / n return sum_results # ================================================================ # Modules # ================================================================
def test_create_optimizer(self): """Test if create optimizer does work with tf optimizers.""" optimizer_config = {'learning_rate': 0.1} # test missing required entry `class` self.assertRaises(AssertionError, create_optimizer, optimizer_config) optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer' with tf.Session().as_default(): # test if the optimizer is created correctlyW optimizer = create_optimizer(optimizer_config) self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer) # test if learning_rate variable is created with the correct value lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0') tf.get_default_session().run(tf.global_variables_initializer()) self.assertAlmostEqual(lr_tensor.eval(), 0.1) optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'} # test missing required argument (momentum in this case) with tf.Graph().as_default(): self.assertRaises(TypeError, create_optimizer, optimizer_config2)
def input_pipeline(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True): filename_queue = tf.train.string_input_producer( filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training) # initialize local variables if num_epochs is not None or it'll raise uninitialized problem tf.get_default_session().run(tf.local_variables_initializer()) example_list = [read_my_file_format(filename_queue, is_training) \ for _ in range(read_threads)] min_after_dequeue = 300 if is_training else 10 capacity = min_after_dequeue + 3 * batch_size clip_batch, img_mask_batch, loss_mask_batch = tf.train.shuffle_batch_join( example_list, batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue) return clip_batch, img_mask_batch, loss_mask_batch
def input_pipeline_dis(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True): filename_queue = tf.train.string_input_producer( filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training) # initialize local variables if num_epochs is not None or it'll raise uninitialized problem tf.get_default_session().run(tf.local_variables_initializer()) example_list = [read_my_file_format_dis(filename_queue, is_training) \ for _ in range(read_threads)] min_after_dequeue = 300 if is_training else 10 capacity = min_after_dequeue + 3 * batch_size clip_batch, label_batch, text_batch = tf.train.shuffle_batch_join( example_list, batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue) return clip_batch, label_batch, text_batch
def step(self, data, update_model=True, align=False, use_sgd=False, **kwargs): if update_model: self.dropout_on.run() else: self.dropout_off.run() encoder_inputs, targets, input_length = self.get_batch(data) input_feed = {self.targets: targets} for i in range(len(self.encoders)): input_feed[self.encoder_inputs[i]] = encoder_inputs[i] input_feed[self.encoder_input_length[i]] = input_length[i] output_feed = {'loss': self.xent_loss} if update_model: output_feed['update'] = self.update_ops.xent[1] if use_sgd else self.update_ops.xent[0] if align: output_feed['weights'] = self.attention_weights res = tf.get_default_session().run(output_feed, input_feed) return namedtuple('output', 'loss weights')(res['loss'], res.get('weights'))
def set_param_values(self, flattened_params, **tags): debug = tags.pop("debug", False) param_values = unflatten_tensors( flattened_params, self.get_param_shapes(**tags)) ops = [] feed_dict = dict() for param, dtype, value in zip( self.get_params(**tags), self.get_param_dtypes(**tags), param_values): if param not in self._cached_assign_ops: assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype) assign_op = tf.assign(param, assign_placeholder) self._cached_assign_ops[param] = assign_op self._cached_assign_placeholders[param] = assign_placeholder ops.append(self._cached_assign_ops[param]) feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype) if debug: print("setting value of %s" % param.name) tf.get_default_session().run(ops, feed_dict=feed_dict)
def predict_sym(self, xs): return L.get_output(self.l_out, xs) # def fit(self, xs, ys): # if self._normalize_inputs: # # recompute normalizing constants for inputs # new_mean = np.mean(xs, axis=0, keepdims=True) # new_std = np.std(xs, axis=0, keepdims=True) + 1e-8 # tf.get_default_session().run(tf.group( # tf.assign(self._x_mean_var, new_mean), # tf.assign(self._x_std_var, new_std), # )) # inputs = [xs, ys] # loss_before = self._optimizer.loss(inputs) # if self._name: # prefix = self._name + "_" # else: # prefix = "" # logger.record_tabular(prefix + 'LossBefore', loss_before) # self._optimizer.optimize(inputs) # loss_after = self._optimizer.loss(inputs) # logger.record_tabular(prefix + 'LossAfter', loss_after) # logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def test_argmax_and_embed(): """Ensure argmax_and_embed works without projection""" embedding = tf.get_variable('embedding', [3, 20]) data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]])) loop_fn = helpers.argmax_and_embed(embedding, output_projection=None) correct = tf.nn.embedding_lookup(embedding, [1]) result = loop_fn(data, 0) # get ready to see if it's right sess = tf.get_default_session() sess.run(tf.initialize_all_variables()) a, b = sess.run([result, correct]) assert np.all(a == b)
def test_sample_and_embed(): """Ensure sample_and_embed works without projection""" embedding = tf.get_variable('embedding', [3, 20]) data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]])) loop_fn = helpers.sample_and_embed(embedding, 1., output_projection=None) result = loop_fn(data, 0) # get ready to see if does indeed pick out one item sess = tf.get_default_session() sess.run(tf.initialize_all_variables()) a, embed_mat = sess.run([result, embedding]) found = False for row in embed_mat: if np.all(row == a): found = True assert found
def test_argmax_and_embed_with_projection(): """Ensure argmax_and_embed works with projection""" embedding = tf.get_variable('embedding', [10, 11]) proj = (tf.get_variable('weights', [3, 10]), tf.get_variable('biases', [10])) data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]], dtype=np.float32)) loop_fn = helpers.argmax_and_embed(embedding, output_projection=proj) # we don't know what the correct answer is now because it's randomly # projected, so let's get what we need to do it by hand correct_projection = tf.nn.bias_add(tf.matmul(data, proj[0]), proj[1]) result = loop_fn(data, 0) # get ready to see if it's right sess = tf.get_default_session() sess.run(tf.initialize_all_variables()) a, embedding, projection = sess.run( [result, embedding, correct_projection]) argmax_p = np.argmax(projection) assert np.all(embedding[argmax_p] == a)
def test_sample_and_embed_with_projection(): """Ensure sample_and_embed works with projection""" embedding = tf.get_variable('embedding', [10, 11]) proj = (tf.get_variable('weights', [3, 10]), tf.get_variable('biases', [10])) data = tf.get_variable('input', initializer=np.array([[1., 2., 1.]], dtype=np.float32)) loop_fn = helpers.sample_and_embed(embedding, 1., output_projection=proj) result = loop_fn(data, 0) # get ready to see if does indeed pick out one item sess = tf.get_default_session() sess.run(tf.initialize_all_variables()) a, embed_mat = sess.run([result, embedding]) found = False for row in embed_mat: if np.all(row == a): found = True assert found
def set_value(x, value): """Sets the value of a variable, from a Numpy array. # Arguments x: Tensor to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape). """ value = np.asarray(value) tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: assign_placeholder = tf.placeholder(tf_dtype, shape=value.shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op session = tf.get_default_session() session.run(assign_op, feed_dict={assign_placeholder: value})
def update_weights_and_calc_values_temp(self, d_w_i_j, layer_to_perturbe, i, j, X): """Update the weights of the given layer cacl the output and return it to the original values""" if layer_to_perturbe + 1 >= len(self.hidden_layers): scope_name = 'softmax_linear' else: scope_name = "hidden" + str(layer_to_perturbe) weights = get_scope_variable(scope_name, "weights", shape=None, initializer=None) session = tf.get_default_session() weights_values = weights.eval(session=session) weights_values_pert = weights_values weights_values_pert[i, j] += d_w_i_j set_value(weights, weights_values_pert) feed_dict = {self.x: X} layer_values = session.run(self.hidden_layers[layer_to_perturbe], feed_dict=feed_dict) set_value(weights, weights_values) return layer_values
def get_value(var, session=None): """ get_value. Get a variable's value. If no session provided, use default one. Arguments: var: `Variable`. The variable to get value from. session: `Session`. The session to run the op. Default: the default session. Returns: The variable's value. """ if not session: session = tf.get_default_session() return var.eval(session)
def set_value(var, value, session=None): """ set_value. Set a variable's value. If no session provided, use default one. Arguments: var: `Variable`. The variable to assign a value. value: The value to assign. Must be compatible with variable dtype. session: `Session`. The session to perform the assignation. Default: the default session. """ op = tf.assign(var, value=value) if not session: session = tf.get_default_session() return op.eval(session=session)
def set_param_values(self, flattened_params, sess=None, **tags): debug = tags.pop("debug", False) param_values = unflatten_tensors( flattened_params, self.get_param_shapes(**tags)) ops = [] feed_dict = dict() for param, dtype, value in zip( self.get_params(**tags), self.get_param_dtypes(**tags), param_values): if param not in self._cached_assign_ops: assign_placeholder = tf.placeholder(dtype=param.dtype.base_dtype, shape=param.get_shape()) assign_op = tf.assign(param, assign_placeholder) self._cached_assign_ops[param] = assign_op self._cached_assign_placeholders[param] = assign_placeholder ops.append(self._cached_assign_ops[param]) feed_dict[self._cached_assign_placeholders[param]] = value.astype(dtype) if debug: print("setting value of %s" % param.name) tf.get_default_session().run(ops, feed_dict=feed_dict)
def save(self, checkpoint_dir=None): if checkpoint_dir is None: checkpoint_dir = logger.get_snapshot_dir() pool_file = os.path.join(checkpoint_dir, 'pool.chk') if self.save_format == 'pickle': pickle_dump(pool_file + '.tmp', self.pool) elif self.save_format == 'joblib': joblib.dump(self.pool, pool_file + '.tmp', compress=1, cache_size=1e9) else: raise NotImplementedError shutil.move(pool_file + '.tmp', pool_file) checkpoint_file = os.path.join(checkpoint_dir, 'params.chk') sess = tf.get_default_session() saver = tf.train.Saver() saver.save(sess, checkpoint_file) tabular_file = os.path.join(checkpoint_dir, 'progress.csv') if os.path.isfile(tabular_file): tabular_chk_file = os.path.join(checkpoint_dir, 'progress.csv.chk') shutil.copy(tabular_file, tabular_chk_file) logger.log('Saved to checkpoint %s'%checkpoint_file)
def restore(self, checkpoint_dir=None): if checkpoint_dir is None: checkpoint_dir = logger.get_snapshot_dir() checkpoint_file = os.path.join(checkpoint_dir, 'params.chk') if os.path.isfile(checkpoint_file + '.meta'): sess = tf.get_default_session() saver = tf.train.Saver() saver.restore(sess, checkpoint_file) tabular_chk_file = os.path.join(checkpoint_dir, 'progress.csv.chk') if os.path.isfile(tabular_chk_file): tabular_file = os.path.join(checkpoint_dir, 'progress.csv') logger.remove_tabular_output(tabular_file) shutil.copy(tabular_chk_file, tabular_file) logger.add_tabular_output(tabular_file) pool_file = os.path.join(checkpoint_dir, 'pool.chk') if self.save_format == 'pickle': pickle_load(pool_file) elif self.save_format == 'joblib': self.pool = joblib.load(pool_file) else: raise NotImplementedError logger.log('Restored from checkpoint %s'%checkpoint_file) else: logger.log('No checkpoint %s'%checkpoint_file)
def save(self, checkpoint_dir=None): if checkpoint_dir is None: checkpoint_dir = logger.get_snapshot_dir() if self.qf is not None: pool_file = os.path.join(checkpoint_dir, 'pool.chk') if self.save_format == 'pickle': pickle_dump(pool_file + '.tmp', self.pool) elif self.save_format == 'joblib': joblib.dump(self.pool, pool_file + '.tmp', compress=1, cache_size=1e9) else: raise NotImplementedError shutil.move(pool_file + '.tmp', pool_file) checkpoint_file = os.path.join(checkpoint_dir, 'params.chk') sess = tf.get_default_session() saver = tf.train.Saver() saver.save(sess, checkpoint_file) tabular_file = os.path.join(checkpoint_dir, 'progress.csv') if os.path.isfile(tabular_file): tabular_chk_file = os.path.join(checkpoint_dir, 'progress.csv.chk') shutil.copy(tabular_file, tabular_chk_file) logger.log('Saved to checkpoint %s'%checkpoint_file)
def map(self, state): """Compute output in session. Make sure a default session is set when calling. """ state = state.flatten() assert(self.state_space.contains(state)) if self.sess is None: sess = tf.get_default_session() else: sess = self.sess mean, var = sess.run([self.a_pred, self.var], {self.X: [state]}) action = np.array(normal(mean, var)) action = action.reshape(self.action_space.shape) return action
def action_given(self, state, add_noise=False): # feed explicitly provided state actions = tf.get_default_session().run(self.output_action, feed_dict={self.input_state: [state], base_network.IS_TRAINING: False}) # NOTE: noise is added _outside_ tf graph. we do this simply because the noisy output # is never used for any part of computation graph required for online training. it's # only used during training after being the replay buffer. if add_noise: if VERBOSE_DEBUG: pre_noise = str(actions) actions[0] += self.exploration_noise.sample() actions = np.clip(1, -1, actions) # action output is _always_ (-1, 1) if VERBOSE_DEBUG: print "TRAIN action_given pre_noise %s post_noise %s" % (pre_noise, actions) return actions
def train(self, observations, actions, advantages): """ take one training step given observations, actions and subsequent advantages""" if VERBOSE_DEBUG: print "TRAIN" print "observations", np.stack(observations) print "actions", actions print "advantages", advantages _, loss = tf.get_default_session().run([self.train_op, self.loss], feed_dict={self.observations: observations, self.actions: actions, self.advantages: advantages}) else: _, loss = tf.get_default_session().run([self.train_op, self.loss], feed_dict={self.observations: observations, self.actions: actions, self.advantages: advantages}) return float(loss)
def __init__(self, name, optimizee=None, n_bptt_steps=None, lr=1e-4, use_avg_loss=False, is_training=True, optimizer_name='adam', **kwargs): self.name = name self.is_training = is_training self.kwargs = kwargs if self.is_training: self.optimizee = optimizee self.optimizer_name = optimizer_name self.x_dim = optimizee.get_x_dim() self.f = optimizee.loss self.n_bptt_steps = n_bptt_steps self.train_lr = lr self.use_avg_loss = use_avg_loss else: self.x_dim = 233 self.session = tf.get_default_session() self._build() self.bid = 0
def initialize(self, session=None): """ Helper for initializing all the variables. Builds and runs model variables and global step initializers. Note that dual variables are initialized only when calling `backward`. :param session: optional tensorflow session (if None default session is used) :return: None """ ss = session or tf.get_default_session() assert ss, 'No default tensorflow session!' if isinstance(self.w, MergedVariable): self.w.initialize(session=session) else: ss.run(tf.variables_initializer([self.w])) ss.run(tf.variables_initializer(self.hyper_gradient_vars + [self.global_step.var]))
def initialize(self, session=None): """ Helper for initializing all the variables. Builds and runs model variables, Zs and global step initializers. :param session: optional tensorflow session (if None default session is used) :return: None """ ss = session or tf.get_default_session() assert ss, 'No default tensorflow session!' if isinstance(self.w, MergedVariable): self.w.initialize(session=session) else: ss.run(tf.variables_initializer([self.w])) # never tested ss.run(tf.variables_initializer(self.hyper_gradient_vars + [self.global_step.var])) [z.initializer().run() for z in self.zs] return True
def run_all(self, T, train_feed_dict_supplier=None, val_feed_dict_suppliers=None, hyper_batch_step=None, forward_su=None, after_forward_su=None): """ Helper method for running :param hyper_batch_step: support for stochastic sampling of validation set :param T: :param train_feed_dict_supplier: :param val_feed_dict_suppliers: :param forward_su: :param after_forward_su: :return: """ # self.initialize() for k in range(T): self.step_forward(train_feed_dict_supplier=train_feed_dict_supplier, summary_utils=forward_su) if after_forward_su: after_forward_su.run(tf.get_default_session(), T) return self.hyper_gradients(val_feed_dict_suppliers, hyper_batch_step)
def run(self, T, train_feed_dict_supplier=None, val_feed_dict_suppliers=None, hyper_constraints_ops=None, _debug_no_hyper_update=False): # TODO add session parameter """ :param _debug_no_hyper_update: :param T: number of steps :param train_feed_dict_supplier: :param val_feed_dict_suppliers: :param hyper_constraints_ops: (list of) either callable (no parameters) or tensorflow ops :return: """ # idea: if steps == T then do full reverse, or forward, otherwise do trho and rtho # after all the main difference is that if we go with the full version, after the gradient has been # computed, the method `initialize()` is called. self.hyper_gradients.run_all(T, train_feed_dict_supplier=train_feed_dict_supplier, val_feed_dict_suppliers=val_feed_dict_suppliers, hyper_batch_step=self.hyper_batch_step.eval()) if not _debug_no_hyper_update: [tf.get_default_session().run(hod.assign_ops) for hod in self.hyper_optimizers] if hyper_constraints_ops: [op() if callable(op) else op.eval() for op in as_list(hyper_constraints_ops)] self.hyper_batch_step.increase.eval()
def main(args): if args.meta_file == None or not os.path.exists(args.meta_file): print("Invalid tensorflow meta-graph file:", args.meta_file) return gpu_options = tf.GPUOptions(allow_growth=True) sess = tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, log_device_placement=False, allow_soft_placement=True)) with sess.as_default(): # ---- load pretrained parameters ---- # saver = tf.train.import_meta_graph(args.meta_file, clear_devices=True) saver.restore(tf.get_default_session(), args.ckpt_file) pretrained = {} var_ = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES) print("total:", len(var_)) for v in var_: print("process:", v.name) # [notice: the name of parameter is like 'Resnet/conv2d/bias:0', # here we should remove the prefix name, and get '/conv2d/bias:0'] v_name = v.name pretrained[v_name] = sess.run([v]) np.save(args.save_path, pretrained) print("done:", len(pretrained.keys()))
def next_batch(self, batch_size=1, sess=None): with tf.name_scope('Batch_getter') as scope: if sess is None : self.sess = tf.get_default_session() else: self.sess = sess images, seqs, masks = self.sess.run([self.images , self.complete_seq, self.complete_mask], feed_dict={self.batch_size : batch_size}) ''' print(seqs) print(str(seqs[0])[0]) process_seqs=[] process_masks =[] for index, c in enumerate(seqs): process_seqs.extend([c.decode()]) process_masks.extend([masks[index].decode()]) ''' print(seqs) c_seqs = self.finalize_seq(seqs) c_masks = self.finalize_seq(masks) print(c_seqs) input_seqs = self.make_input_seq(c_seqs) output_seqs = self.make_output_seq(c_seqs) final_masks = self.make_input_seq(c_masks) return images , input_seqs, output_seqs, final_masks
def test_variable_by_name(self): with self.test_context(): name = 'variable' variable = tf.get_variable(name, shape=()) self.assertTrue(gpflow.misc.is_initializable_tensor(variable)) def equal(found): self.assertTrue(gpflow.misc.is_initializable_tensor(found)) self.assertEqual(found, variable) def not_equal(found): self.assertEqual(found, None) fn = gpflow.misc.get_variable_by_name graph = tf.Graph() session = tf.get_default_session() fake_name = "foo" equal(fn(name)) equal(fn(name, graph=session.graph)) not_equal(fn(name, graph=graph)) not_equal(fn(fake_name)) not_equal(fn(fake_name, graph=graph))
def action_given(self, state, add_noise): # NOTE: noise is added _outside_ tf graph. we do this simply because the noisy output # is never used for any part of computation graph required for online training. it's # only used during training after being the replay buffer. actions = tf.get_default_session().run(self.output_action, feed_dict={self.input_state: [state], base_network.IS_TRAINING: False, base_network.FLIP_HORIZONTALLY: False}) if add_noise: if VERBOSE_DEBUG: pre_noise = str(actions) actions[0] += self.exploration_noise.sample() actions = np.clip(1, -1, actions) # action output is _always_ (-1, 1) if VERBOSE_DEBUG: print "TRAIN action_given pre_noise %s post_noise %s" % (pre_noise, actions) return map(float, np.squeeze(actions))
def _run_monitor(self, monitor, num_epochs=3, num_steps_per_epoch=10, pass_max_steps=True): if pass_max_steps: max_steps = num_epochs * num_steps_per_epoch - 1 else: max_steps = None monitor.begin(max_steps=max_steps) for epoch in xrange(num_epochs): monitor.epoch_begin(epoch) should_stop = False step = epoch * num_steps_per_epoch next_epoch_step = step + num_steps_per_epoch while (not should_stop) and (step < next_epoch_step): tensors = monitor.step_begin(step) output = tf.get_default_session().run(tensors) if tensors else {} output = dict(zip( [t.name if isinstance(t, tf.Tensor) else t for t in tensors], output)) should_stop = monitor.step_end(step=step, output=output) monitor.post_step(step=step, session=None) step += 1 monitor.epoch_end(epoch) monitor.end()
def verify_image_jpeg(imagepath, imageshape): scope = inspect.stack()[0][3] try: graph = tf.get_default_graph() path = graph.get_tensor_by_name(scope + '/path:0') decode = graph.get_tensor_by_name(scope + '/decode_jpeg:0') except KeyError: tf.logging.debug('creating decode_jpeg tensor') path = tf.placeholder(tf.string, name=scope + '/path') imagefile = tf.read_file(path, name=scope + '/read_file') decode = tf.image.decode_jpeg(imagefile, channels=3, name=scope + '/decode_jpeg') try: image = tf.get_default_session().run(decode, {path: imagepath}) except: return False return np.all(np.equal(image.shape[:2], imageshape[:2]))
def test(self, model, cases): sess = tf.get_default_session() guarantee_initialized_variables(sess) embeds = model.compute(model.embeds, cases) primitive_embeddings = RLongPrimitiveEmbeddings(6) # compute object embedding after applying projection object_projection_layer = model._object_projection_layer W, b = object_projection_layer.get_weights() # shapes [10, 6] and [6] object_embed = np.ones(10).dot(W) + b assert_array_almost_equal(embeds[0], np.concatenate((np.zeros(6), primitive_embeddings['r'], primitive_embeddings[-1])) ) assert_array_almost_equal(embeds[1], np.concatenate((np.zeros(6), np.zeros(6), primitive_embeddings['X1/1'])) ) assert_array_almost_equal(embeds[2], np.concatenate((primitive_embeddings['b'], object_embed, object_embed)) )
def save_checkpoint(self, checkpoint_name): tf.get_collection_ref("threshold")[:] = [float(self.threshold)] tf.get_collection_ref("features")[:] = self.features.values() tf.get_collection_ref("loss")[:] = [self.loss] tf.get_collection_ref("prediction")[:] = [self.prediction] os.makedirs(os.path.dirname(checkpoint_name), exist_ok=True) saver = tf.train.Saver() saver.save(tf.get_default_session(), checkpoint_name) with open(os.path.join(os.path.dirname(checkpoint_name), "hparams.txt"), "w") as f: f.write(repr(self.hparams.__dict__))
def predict_proba_with_loss(self, X, y): feed_dict = {} feed_dict[self.labels] = y for key, tensor in self.features.items(): feed_dict[tensor] = X[key] prediction, loss = tf.get_default_session().run( [self.prediction, self.loss], feed_dict=feed_dict) return np.reshape(prediction, [-1]), loss
def act(self, ob, c, h): sess = tf.get_default_session() return sess.run([self.sample, self.vf] + self.state_out, {self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, c, h): sess = tf.get_default_session() return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})[0]
def act(self, ob): sess = tf.get_default_session() return sess.run([self.sample, self.vf], {self.x: [ob]})