Python tensorflow 模块,global_variables_initializer() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.global_variables_initializer()

项目:How-to-Learn-from-Little-Data    作者:llSourcell    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _start_session(self):
        """
        Starts the Tensorflow Session

        :return: None
        """
        self.sess.run(tf.global_variables_initializer())
        # initialize the saver node
        # print tf.GraphKeys.GLOBAL_VARIABLES
        self.saver = tf.train.Saver(tf.global_variables())
        # get the latest checkpoint
        last_checkpoint_path = self.checkpointer.get_last_checkpoint()
        if last_checkpoint_path is not None:
            print 'Previous saved tensorflow objects found... Extracting...'
            # restore the tensorflow variables
            self.saver.restore(self.sess, last_checkpoint_path)
            print 'Extraction Complete. Moving Forward....'
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def fit(self, X_train, y_train, X_valid, y_valid, X_test, y_test, steps=400):
        tf.global_variables_initializer().run()
        redirect=FDRedirector(STDERR)
        for i in range(steps):
            redirect.start()
            feed_dict = {self.labels:y_train}
            for key, tensor in self.features.items():
                feed_dict[tensor] = X_train[key]
            predictions, loss = sess.run([self.prediction, self.train_op], feed_dict=feed_dict)
            if i % 10 == 0:
                print("step:{} loss:{:.3g} np.std(predictions):{:.3g}".format(i, loss, np.std(predictions)))
                self.threshold = float(min(self.threshold_from_data(X_valid, y_valid), self.threshold_from_data(X_train, y_train)))
                tf.get_collection_ref("threshold")[0] = self.threshold
                self.print_metrics(X_train, y_train, "Training")
                self.print_metrics(X_valid, y_valid, "Validation")
            errors = redirect.stop()
            if errors:
                print(errors)
        self.print_metrics(X_test, y_test, "Test")
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_qrnn_linear_forward(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_linear:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=1)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_qrnn_with_previous(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_with_previous:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=2)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def test_qrnn_convolution(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_conv:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=3)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_encode(self):
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    example_length = tf.ones(
        self.batch_size, dtype=tf.int32) * self.sequence_length

    encode_fn = rnn_encoder.UnidirectionalRNNEncoder(self.params, self.mode)
    encoder_output = encode_fn(inputs, example_length)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      encoder_output_ = sess.run(encoder_output)

    np.testing.assert_array_equal(encoder_output_.outputs.shape,
                                  [self.batch_size, self.sequence_length, 32])
    self.assertIsInstance(encoder_output_.final_state,
                          tf.contrib.rnn.LSTMStateTuple)
    np.testing.assert_array_equal(encoder_output_.final_state.h.shape,
                                  [self.batch_size, 32])
    np.testing.assert_array_equal(encoder_output_.final_state.c.shape,
                                  [self.batch_size, 32])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _test_encode_with_params(self, params):
    """Tests the StackBidirectionalRNNEncoder with a specific cell"""
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    example_length = tf.ones(
        self.batch_size, dtype=tf.int32) * self.sequence_length

    encode_fn = rnn_encoder.StackBidirectionalRNNEncoder(params, self.mode)
    encoder_output = encode_fn(inputs, example_length)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      encoder_output_ = sess.run(encoder_output)

    output_size = encode_fn.params["rnn_cell"]["cell_params"]["num_units"]

    np.testing.assert_array_equal(
        encoder_output_.outputs.shape,
        [self.batch_size, self.sequence_length, output_size * 2])

    return encoder_output_
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_reading_without_targets(self):
    num_epochs = 50
    data_provider = make_parallel_data_provider(
        data_sources_source=[self.source_file.name],
        data_sources_target=None,
        num_epochs=num_epochs,
        shuffle=True)

    item_keys = list(data_provider.list_items())
    item_values = data_provider.get(item_keys)
    items_dict = dict(zip(item_keys, item_values))

    self.assertEqual(set(item_keys), set(["source_tokens", "source_len"]))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      with tf.contrib.slim.queues.QueueRunners(sess):
        item_dicts_ = [sess.run(items_dict) for _ in range(num_epochs * 3)]

    for item_dict in item_dicts_:
      self.assertEqual(item_dict["source_len"], 2)
      item_dict["source_tokens"] = np.char.decode(
          item_dict["source_tokens"].astype("S"), "utf-8")
      self.assertEqual(item_dict["source_tokens"][-1], "SEQUENCE_END")
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_with_fixed_inputs(self):
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    seq_length = tf.ones(self.batch_size, dtype=tf.int32) * self.sequence_length

    helper = decode_helper.TrainingHelper(
        inputs=inputs, sequence_length=seq_length)
    decoder_fn = self.create_decoder(
        helper=helper, mode=tf.contrib.learn.ModeKeys.TRAIN)
    initial_state = decoder_fn.cell.zero_state(
        self.batch_size, dtype=tf.float32)
    decoder_output, _ = decoder_fn(initial_state, helper)

    #pylint: disable=E1101
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      decoder_output_ = sess.run(decoder_output)

    np.testing.assert_array_equal(
        decoder_output_.logits.shape,
        [self.sequence_length, self.batch_size, self.vocab_size])
    np.testing.assert_array_equal(decoder_output_.predicted_ids.shape,
                                  [self.sequence_length, self.batch_size])

    return decoder_output_
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_with_dynamic_inputs(self):
    embeddings = tf.get_variable("W_embed", [self.vocab_size, self.input_depth])

    helper = decode_helper.GreedyEmbeddingHelper(
        embedding=embeddings, start_tokens=[0] * self.batch_size, end_token=-1)
    decoder_fn = self.create_decoder(
        helper=helper, mode=tf.contrib.learn.ModeKeys.INFER)
    initial_state = decoder_fn.cell.zero_state(
        self.batch_size, dtype=tf.float32)
    decoder_output, _ = decoder_fn(initial_state, helper)

    #pylint: disable=E1101
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      decoder_output_ = sess.run(decoder_output)

    np.testing.assert_array_equal(
        decoder_output_.logits.shape,
        [self.max_decode_length, self.batch_size, self.vocab_size])
    np.testing.assert_array_equal(decoder_output_.predicted_ids.shape,
                                  [self.max_decode_length, self.batch_size])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _test_with_residuals(self, inputs, **kwargs):
    """Runs the cell in a session"""
    inputs = tf.convert_to_tensor(inputs)
    state = (tf.constant(np.random.randn(1, 2)),
             tf.constant(np.random.randn(1, 2)))

    with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
      test_cell = rnn_cell.ExtendedMultiRNNCell(
          [tf.contrib.rnn.GRUCell(2) for _ in range(2)],
          residual_connections=True,
          **kwargs)
      res_test = test_cell(inputs, state, scope="test")

    with self.test_session() as sess:
      sess.run([tf.global_variables_initializer()])
      return sess.run(res_test)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _test_with_params(self, params):
    """Tests the encoder with a given parameter configuration"""
    inputs = tf.random_normal(
        [self.batch_size, self.sequence_length, self.input_depth])
    example_length = tf.ones(
        self.batch_size, dtype=tf.int32) * self.sequence_length

    encode_fn = PoolingEncoder(params, self.mode)
    encoder_output = encode_fn(inputs, example_length)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      encoder_output_ = sess.run(encoder_output)

    np.testing.assert_array_equal(
        encoder_output_.outputs.shape,
        [self.batch_size, self.sequence_length, self.input_depth])
    np.testing.assert_array_equal(
        encoder_output_.attention_values.shape,
        [self.batch_size, self.sequence_length, self.input_depth])
    np.testing.assert_array_equal(encoder_output_.final_state.shape,
                                  [self.batch_size, self.input_depth])
项目:yolo_tensorflow    作者:hizhangp    | 项目源码 | 文件源码
def __init__(self, net, weight_file):
        self.net = net
        self.weights_file = weight_file

        self.classes = cfg.CLASSES
        self.num_class = len(self.classes)
        self.image_size = cfg.IMAGE_SIZE
        self.cell_size = cfg.CELL_SIZE
        self.boxes_per_cell = cfg.BOXES_PER_CELL
        self.threshold = cfg.THRESHOLD
        self.iou_threshold = cfg.IOU_THRESHOLD
        self.boundary1 = self.cell_size * self.cell_size * self.num_class
        self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        print 'Restoring weights from: ' + self.weights_file
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.weights_file)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_create_optimizer(self):
        """Test if create optimizer does work with tf optimizers."""

        optimizer_config = {'learning_rate': 0.1}

        # test missing required entry `class`
        self.assertRaises(AssertionError, create_optimizer, optimizer_config)

        optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer'

        with tf.Session().as_default():
            # test if the optimizer is created correctlyW
            optimizer = create_optimizer(optimizer_config)
            self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer)

            # test if learning_rate variable is created with the correct value
            lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0')
            tf.get_default_session().run(tf.global_variables_initializer())
            self.assertAlmostEqual(lr_tensor.eval(), 0.1)

        optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'}

        # test missing required argument (momentum in this case)
        with tf.Graph().as_default():
            self.assertRaises(TypeError, create_optimizer, optimizer_config2)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def predict(self, model_path, x_test):
        """
        Uses the model to create a prediction for the given data

        :param model_path: path to the model checkpoint to restore
        :param x_test: Data to predict on. Shape [n, nx, ny, channels]
        :returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2) 
        """

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            # Initialize variables
            sess.run(init)

            # Restore model weights from previously saved model
            self.restore(sess, model_path)

            y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
            prediction = sess.run(self.predicter, feed_dict={self.x: x_test, self.y: y_dummy, self.keep_prob: 1.})

        return prediction
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def start(self, restore=False):
        self._sess = tf.Session()
        self._init = tf.global_variables_initializer()
        self._saver = tf.train.Saver()

        self._summary = tf.summary.merge_all()
        self._summary_writer = tf.summary.FileWriter(self.config.model_save_path, graph=self._sess.graph)
        self._summary_writer.flush()

        self._sess.run(self._init)

        if restore:
            checkpoint = tf.train.get_checkpoint_state(self.config.model_save_path)
            if checkpoint and checkpoint.model_checkpoint_path:
                tf.train.restore(self._sess, checkpoint.model_checkpoint_path)

        self._started = True
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __init__(self,
                 z_dim, image_size,
                 lr_d, lr_g):

        self.sess = tf.Session()

        self.z_dim = z_dim
        self.image_size = image_size

        self.gen = GeneratorDeconv(input_size = z_dim,
                                   image_size = image_size)
        self.disc = Discriminator()

        self._build_graph(lr_d = lr_d, lr_g = lr_g)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _build_graph(self, image_size):

        self.image_size = image_size
        self.images = tf.placeholder(tf.float32,
                                     shape = (None, image_size, image_size, 3))
        images_mini = tf.image.resize_images(self.images,
                                             size = (int(image_size/4),
                                                     int(image_size/4)))
        self.images_blur = tf.image.resize_images(images_mini,
                                                  size = (image_size, image_size))

        self.net = U_Net(output_ch = 3, block_fn = 'origin')
        self.images_reconst = self.net(self.images_blur, reuse = False)
        # self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images.
        self.loss = tf.reduce_mean((self.images_reconst - self.images)**2)
        self.opt = tf.train.AdamOptimizer()\
                           .minimize(self.loss, var_list = self.net.vars)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __init__(self,
                 label_size,
                 z_dim, image_size,
                 lr_d, lr_g):

        self.sess = tf.Session()

        self.label_size = label_size
        self.z_dim = z_dim
        self.image_size = image_size

        self.gen = GeneratorDeconv(input_size = z_dim+label_size,
                                   image_size = image_size)
        self.disc = Discriminator()

        self._build_graph(lr_d = lr_d, lr_g = lr_g)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer())
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def predictPL(self):
        B = self.flags.batch_size
        W,H,C = self.flags.width, self.flags.height, self.flags.color
        inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

        #with open(self.flags.pred_path,'w') as f:
        #    pass

        self._build(inputs,resize=False)
        counter = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            for imgs,imgnames in self.DATA.test_generator():
                pred = sess.run(self.logit,feed_dict={inputs:imgs})
                np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
                counter+=len(imgs)
                if counter/B%10 ==0:
                    print_mem_time("%d images predicted"%counter)

    # train with placeholders
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def predict_from_placeholder(self,activation=None):
        self._build()
        self._get_summary()
        if activation is not None:
            self.logit = self._activate(self.logit,activation)
        with open(self.flags.pred_path,'w') as f:
            pass
        count = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            if self.flags.log_path and self.flags.visualize is not None:
                summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)
            for batch in self._batch_gen_test():
                x,_,epoch = batch
                if self.flags.log_path and self.flags.visualize is not None:
                    summary,pred = sess.run([self.summ_op,self.logit],feed_dict={self.inputs:x,self.is_training:0})
                    summary_writer.add_summary(summary, count)
                else:
                    pred = sess.run(self.logit,feed_dict={self.inputs:x,self.is_training:0})
                count+=1
                if count%self.flags.verbosity == 0:
                    print_mem_time("Epoch %d Batch %d "%(epoch,count))
                self.write_pred(pred)
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=19
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1
    fastText=fastTextB(num_classes, learning_rate, batch_size, decay_steps, decay_rate,5,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length),dtype=np.int32) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1],dtype=np.int32) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([fastText.loss_val,fastText.accuracy,fastText.predictions,fastText.train_op],
                                        feed_dict={fastText.sentence:input_x,fastText.labels:input_y})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def setUp(self):
        """Set up class before _each_ test method is executed.

        Creates a tensorflow session and instantiates a dbinterface.

        """
        self.setup_model()
        self.sess = tf.Session(
            config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options=tf.GPUOptions(allow_growth=True),
                log_device_placement=self.params['log_device_placement'],
                inter_op_parallelism_threads=self.params['inter_op_parallelism_threads']))

        # TODO: Determine whether this should be called here or
        # in dbinterface.initialize()
        self.sess.run(tf.global_variables_initializer())

        self.dbinterface = base.DBInterface(sess=self.sess,
                                            params=self.params,
                                            cache_dir=self.CACHE_DIR,
                                            save_params=self.save_params,
                                            load_params=self.load_params)

        self.step = 0
项目:NTM-One-Shot-TF    作者:hmishra2250    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:onsager_deep_learning    作者:mborgerding    | 项目源码 | 文件源码
def show_shrinkage(shrink_func,theta,**kwargs):
    tf.reset_default_graph()
    tf.set_random_seed(kwargs.get('seed',1) )

    N = kwargs.get('N',500)
    L = kwargs.get('L',4)
    nsigmas = kwargs.get('sigmas',10)
    shape = (N,L)
    rvar = 1e-4
    r = np.reshape( np.linspace(0,nsigmas,N*L)*math.sqrt(rvar),shape)
    r_ = tfcf(r)
    rvar_ = tfcf(np.ones(L)*rvar)

    xhat_,dxdr_ = shrink_func(r_,rvar_ ,tfcf(theta))

    with tf.Session() as sess:
        sess.run( tf.global_variables_initializer() )
        xhat = sess.run(xhat_)
    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.plot(r.reshape(-1),r.reshape(-1),'y')
    plt.plot(r.reshape(-1),xhat.reshape(-1),'b')
    if kwargs.has_key('title'):
        plt.suptitle(kwargs['title'])
    plt.show()
项目:PyMDNet    作者:HungWei-Andy    | 项目源码 | 文件源码
def tracking(dataset, seq, display, restore_path):
  train_data = reader.read_seq(dataset, seq)
  im_size = proc.load_image(train_data.data[seq].frames[0]).shape[:2]
  config = Config(im_size)

  # create session and saver
  gpu_config = tf.ConfigProto(allow_soft_placement=True)
  sess = tf.InteractiveSession(config=gpu_config)

  # load model, weights
  model = MDNet(config)
  model.build_generator(config.batch_size, reuse=False, dropout=True)
  tf.global_variables_initializer().run()

  # create saver
  saver = tf.train.Saver([v for v in tf.global_variables() if ('conv' in v.name or 'fc4' in v.name or 'fc5' in v.name) \
                          and 'lr_rate' not in v.name], max_to_keep=50)

  # restore from model
  saver.restore(sess, restore_path)

  # run mdnet
  mdnet_run(sess, model, train_data.data[seq].gts[0], train_data.data[seq].frames, config, display)
项目:handwritten-sequence-tensorflow    作者:johnsmithm    | 项目源码 | 文件源码
def fast_run(args):
    model = Model(args)
    feed = {}
    #feed[model.train_batch]=False
    xx,ss,yy=model.inputs(args.input_path)

    sess = tf.Session()
    init = tf.global_variables_initializer()
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    xxx,sss,yyy=sess.run([xx,ss,yy])
    #print(yyy)
    #print(yyy[1])
    print('len:',xxx.shape)
    import matplotlib.cm as cm
    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt
    plt.figure(figsize=(16,4))
    #plt.imshow()
    plt.imshow(np.asarray(xxx[0]).reshape((36,90))+0.5, interpolation='nearest', aspect='auto', cmap=cm.jet)
    plt.savefig("img.jpg")
    plt.clf() ; plt.cla()
项目:yt8m    作者:forwchen    | 项目源码 | 文件源码
def export_model(self, model_dir, global_step_val, last_checkpoint):
    """Exports the model so that it can used for batch predictions."""

    with self.graph.as_default():
      with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        self.saver.restore(session, last_checkpoint)

        signature = signature_def_utils.build_signature_def(
            inputs=self.inputs,
            outputs=self.outputs,
            method_name=signature_constants.PREDICT_METHOD_NAME)

        signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                         signature}

        model_builder = saved_model_builder.SavedModelBuilder(model_dir)
        model_builder.add_meta_graph_and_variables(session,
            tags=[tag_constants.SERVING],
            signature_def_map=signature_map,
            clear_devices=True)
        model_builder.save()
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def test_link_matrix(self):
        b, n = 2, 5
        write_weighting = np.random.rand(b, n)
        precedence_weighting = np.random.rand(b, n)  # precedence weighting from previous time step
        link_matrix_old = np.random.rand(b, n, n) * (
            1 - np.tile(np.eye(5), [b, 1, 1]))  # random link matrix with diagonals zero
        link_matrix_correct = np.zeros((b, n, n))
        for k in range(b):
            for i in range(n):
                for j in range(n):
                    if i != j:
                        link_matrix_correct[k, i, j] = (1 - write_weighting[k, i] - write_weighting[k, j]) * \
                                                       link_matrix_old[k, i, j] + \
                                                       write_weighting[k, i] * precedence_weighting[k, j]

        with self.test_session():
            tf.global_variables_initializer().run()
            Memory.batch_size = b
            Memory.memory_size = n
            new_link_matrix = Memory.update_link_matrix(Memory,
                                                        tf.constant(link_matrix_old, dtype=tf.float32),
                                                        tf.constant(precedence_weighting, dtype=tf.float32),
                                                        tf.constant(write_weighting, dtype=tf.float32))
            self.assertAllClose(link_matrix_correct, new_link_matrix.eval())
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def setUp(self):
        super(TestVirtualAdversarialMethod, self).setUp()
        import tensorflow as tf
        import tensorflow.contrib.slim as slim

        def dummy_model(x):
            net = slim.fully_connected(x, 60)
            return slim.fully_connected(net, 10, activation_fn=None)

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = tf.make_template('dummy_model', dummy_model)
        self.attack = VirtualAdversarialMethod(self.model, sess=self.sess)

        # initialize model
        with tf.name_scope('dummy_model'):
            self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
        self.sess.run(tf.global_variables_initializer())
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def setUp(self):
        super(TestSaliencyMapMethod, self).setUp()
        import tensorflow as tf
        import tensorflow.contrib.slim as slim

        def dummy_model(x):
            net = slim.fully_connected(x, 60)
            return slim.fully_connected(net, 10, activation_fn=None)

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = tf.make_template('dummy_model', dummy_model)
        self.attack = SaliencyMapMethod(self.model, sess=self.sess)

        # initialize model
        with tf.name_scope('dummy_model'):
            self.model(tf.placeholder(tf.float32, shape=(None, 1000)))
        self.sess.run(tf.global_variables_initializer())

        self.attack = SaliencyMapMethod(self.model, sess=self.sess)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = inception.inception_v2(inputs, num_classes)
      self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Mixed_5c']
      feed_dict = {inputs: input_np}
      tf.global_variables_initializer().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknowBatchSize(self):
    batch_size = 1
    height, width = 224, 224
    num_classes = 1000

    inputs = tf.placeholder(tf.float32, (None, height, width, 3))
    logits, _ = inception.inception_v2(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, num_classes])
    images = tf.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000

    train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
    inception.inception_v2(train_inputs, num_classes)
    eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000
    with self.test_session() as sess:
      train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
      inception.inception_resnet_v2(train_inputs, num_classes)
      eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
      logits, _ = inception.inception_resnet_v2(eval_inputs,
                                                num_classes,
                                                is_training=False,
                                                reuse=True)
      predictions = tf.argmax(logits, 1)
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknownBatchSize(self):
    batch = 2
    height, width = 65, 65
    global_pool = True
    num_classes = 10
    inputs = create_test_input(None, height, width, 3)
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      logits, _ = self._resnet_small(inputs, num_classes,
                                     global_pool=global_pool,
                                     spatial_squeeze=False,
                                     scope='resnet')
    self.assertTrue(logits.op.name.startswith('resnet/logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, 1, 1, num_classes])
    images = create_test_input(batch, height, width, 3)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEqual(output.shape, (batch, 1, 1, num_classes))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
    batch = 2
    height, width = 65, 65
    global_pool = False
    output_stride = 8
    inputs = create_test_input(batch, None, None, 3)
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      output, _ = self._resnet_small(inputs,
                                     None,
                                     global_pool=global_pool,
                                     output_stride=output_stride)
    self.assertListEqual(output.get_shape().as_list(),
                         [batch, None, None, 32])
    images = create_test_input(batch, height, width, 3)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(output, {inputs: images.eval()})
      self.assertEqual(output.shape, (batch, 9, 9, 32))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
      self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Conv2d_13_pointwise']
      feed_dict = {inputs: input_np}
      tf.global_variables_initializer().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknowBatchSize(self):
    batch_size = 1
    height, width = 224, 224
    num_classes = 1000

    inputs = tf.placeholder(tf.float32, (None, height, width, 3))
    logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, num_classes])
    images = tf.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000

    train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
    mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
    eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
                                          reuse=True)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 299, 299
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = inception.inception_v3(inputs, num_classes)
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Mixed_7c']
      feed_dict = {inputs: input_np}
      tf.global_variables_initializer().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknowBatchSize(self):
    batch_size = 1
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.placeholder(tf.float32, (None, height, width, 3))
    logits, _ = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, num_classes])
    images = tf.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000
    with self.test_session() as sess:
      train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
      inception.inception_v4(train_inputs, num_classes)
      eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
      logits, _ = inception.inception_v4(eval_inputs,
                                         num_classes,
                                         is_training=False,
                                         reuse=True)
      predictions = tf.argmax(logits, 1)
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testUnknownImageShape(self):
    tf.reset_default_graph()
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000
    input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
    with self.test_session() as sess:
      inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
      logits, end_points = inception.inception_v1(inputs, num_classes)
      self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
      self.assertListEqual(logits.get_shape().as_list(),
                           [batch_size, num_classes])
      pre_pool = end_points['Mixed_5c']
      feed_dict = {inputs: input_np}
      tf.global_variables_initializer().run()
      pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
      self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 224, 224
    num_classes = 1000

    train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
    inception.inception_v1(train_inputs, num_classes)
    eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_fractalnet_smoketest(self):
    input_placeholder = tf.placeholder(tf.float32, [None, 3])
    output_placeholder = tf.placeholder(tf.float32, [None, 3])
    fractal_net = tdl.FractalNet(3, 2, lambda name: tdl.FC(3, name=name))
    result = fractal_net(input_placeholder)
    loss = tf.nn.l2_loss(result - output_placeholder)
    optr = tf.train.GradientDescentOptimizer(0.001)
    trainer = optr.minimize(loss)

    dataset = np.random.standard_normal([10, 3])
    answers = np.random.standard_normal([10, 3])

    feed_dict = {input_placeholder: dataset, output_placeholder: answers}
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      old_loss = loss.eval(feed_dict)
      for unused_iteration in range(20):
        sess.run([trainer], feed_dict)
      new_loss = loss.eval(feed_dict)
      self.assertLess(new_loss, old_loss)