Python tensorflow 模块,add_check_numerics_ops() 实例源码

我们从Python开源项目中,提取了以下3个代码示例,用于说明如何使用tensorflow.add_check_numerics_ops()

项目:divcolor    作者:aditya12agd5    | 项目源码 | 文件源码
def __build_graph(self):
        self.plhold_img, self.plhold_greylevel, self.plhold_latent, self.plhold_is_training, \
        self.plhold_keep_prob, self.plhold_kl_weight, self.plhold_lossweights \
          = self.model.inputs()

        #inference graph
        self.op_mean, self.op_stddev, self.op_vae, \
        self.op_mean_test, self.op_stddev_test, self.op_vae_test, \
        self.op_vae_condinference \
            = self.model.inference(self.plhold_img, self.plhold_greylevel, \
                self.plhold_latent, self.plhold_is_training, self.plhold_keep_prob)

        #loss function and gd step for vae
        self.loss = self.model.loss(self.plhold_img, self.op_vae, self.op_mean, \
            self.op_stddev, self.plhold_kl_weight, self.plhold_lossweights)
        self.train_step = self.model.optimize(self.loss, epsilon=1e-6)

        #standard steps
        self.check_nan_op = tf.add_check_numerics_ops()
        self.init = tf.global_variables_initializer()
        self.saver = tf.train.Saver(max_to_keep=0)
        self.summary_op = tf.summary.merge_all()
项目:keras-fcn    作者:JihongJu    | 项目源码 | 文件源码
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        self.check_num = tf.add_check_numerics_ops()
项目:TerpreT    作者:51alg    | 项目源码 | 文件源码
def build_computation_graphs(self):
        self.model.declare_params(self.param_init_function)

        self.tf_nodes = {}
        to_build = {k:v for k, v in self.model_hypers_to_build_graph.iteritems() 
                        if k in self.data.get_hypers_names()}

        for model_hypers, build_graph in to_build.iteritems():
            print ("Construct forward graph... ", end="")

            forward_time_start = time.time()
            inputs, outputs = build_graph(self.model)
            loss, display_loss, output_placeholders, mask_placeholders, loss_nodes = \
                self.construct_loss(outputs)
            print ("done in %.2fs." % (time.time() - forward_time_start))

            optimizer = self.make_optimizer()

            gradient_time_start = time.time()
            print ("Construct gradient graph... ", end="")
            grads_and_vars = self.compute_gradients(optimizer, loss)
            print ("done in %.2fs." % (time.time() - gradient_time_start))

            gradient_apply_time_start = time.time()
            print ("Construct apply gradient graph... ", end="")
            train_op = self.apply_update(optimizer, grads_and_vars)
            print ("done in %.2fs." % (time.time() - gradient_apply_time_start))

            if self.do_debug:
                check_time_start = time.time()
                print ("Construct check numerics graph... ", end="")
                self.check_ops.append(tf.add_check_numerics_ops())
                print ("done in %.2fs." % (time.time() - check_time_start))

            if self.make_log:
                self.summary_nodes["train"] = tf.scalar_summary('train_loss', display_loss)
                self.summary_nodes["validate"] = tf.scalar_summary('validate_loss', display_loss)
                self.summary_nodes["params"] = []
                for p_name, p_node in self.model.params.iteritems():
                    n_elements = p_node.get_shape()[0].value
                    for i in range(n_elements):
                        self.summary_nodes["params"].append(
                            tf.scalar_summary('%s/%i' % (p_name, i), p_node[i]))


            placeholders = {}
            placeholders.update(inputs)
            placeholders.update(output_placeholders)
            placeholders.update(mask_placeholders)
            self.tf_nodes[model_hypers] = {
                "inputs": inputs,
                "outputs": outputs,
                "placeholders": placeholders,
                "loss_nodes": loss_nodes,
                "loss": loss,
                "display_loss": display_loss,
                "grads_and_vars": grads_and_vars,
                "train_op": train_op
            }