Python tensorflow 模块,InteractiveSession() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.InteractiveSession()

项目:How-to-Learn-from-Little-Data    作者:llSourcell    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:NTM-One-Shot-TF    作者:hmishra2250    | 项目源码 | 文件源码
def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
项目:PyMDNet    作者:HungWei-Andy    | 项目源码 | 文件源码
def tracking(dataset, seq, display, restore_path):
  train_data = reader.read_seq(dataset, seq)
  im_size = proc.load_image(train_data.data[seq].frames[0]).shape[:2]
  config = Config(im_size)

  # create session and saver
  gpu_config = tf.ConfigProto(allow_soft_placement=True)
  sess = tf.InteractiveSession(config=gpu_config)

  # load model, weights
  model = MDNet(config)
  model.build_generator(config.batch_size, reuse=False, dropout=True)
  tf.global_variables_initializer().run()

  # create saver
  saver = tf.train.Saver([v for v in tf.global_variables() if ('conv' in v.name or 'fc4' in v.name or 'fc5' in v.name) \
                          and 'lr_rate' not in v.name], max_to_keep=50)

  # restore from model
  saver.restore(sess, restore_path)

  # run mdnet
  mdnet_run(sess, model, train_data.data[seq].gts[0], train_data.data[seq].frames, config, display)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_sgld_sparse(self):
        tf.reset_default_graph()

        z     = tf.Variable(tf.zeros((5, 2)), dtype=tf.float32)
        idx   = tf.placeholder(tf.int32)
        zi    = tf.gather(z, idx)
        zloss = tf.square(zi - [10.0, 5.0])

        sgld = SGLD(learning_rate=0.4)
        train_op_sgld = sgld.minimize(zloss)

        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        self.assertTrue(np.alltrue(sess.run(z) == 0.0))

        sess.run(train_op_sgld, feed_dict={idx: 3})
        zh = sess.run(z)
        self.assertTrue(np.alltrue(zh[[0, 1, 2, 4], :] == 0.0))
        self.assertTrue(zh[3, 0] > 0)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_psgld_sparse(self):
        tf.reset_default_graph()

        z     = tf.Variable(tf.zeros((5, 2)), dtype=tf.float32)
        idx   = tf.placeholder(tf.int32)
        zi    = tf.gather(z, idx)
        zloss = tf.square(zi - [10.0, 5.0])

        psgld = pSGLD(learning_rate=0.4)
        train_op_psgld = psgld.minimize(zloss)

        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        self.assertTrue(np.alltrue(sess.run(z) == 0.0))

        sess.run(train_op_psgld, feed_dict={idx: 3})
        zh = sess.run(z)
        self.assertTrue(np.alltrue(zh[[0, 1, 2, 4], :] == 0.0))
        self.assertTrue(zh[3, 0] > 0)
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def __init__(self, env):
        self.name = 'DDPG' # name for uploading results
        self.environment = env
        # Randomly initialize actor network and critic network
        # with both their target networks
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]

        self.sess = tf.InteractiveSession()

        self.actor_network = ActorNetwork(self.sess,self.state_dim,self.action_dim)
        self.critic_network = CriticNetwork(self.sess,self.state_dim,self.action_dim)

        # initialize replay buffer
        self.replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE)

        # Initialize a random process the Ornstein-Uhlenbeck process for action exploration
        self.exploration_noise = OUNoise(self.action_dim)

        self.saver = tf.train.Saver()
项目:-NIPS-2017-Learning-to-Run    作者:kyleliang919    | 项目源码 | 文件源码
def __init__(self, env):
        self.name = 'RDPG' # name for uploading results
        self.environment = env
        # Randomly initialize actor network and critic network
        # with both their target networks
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = env.action_space.shape[0]

        self.sess = tf.InteractiveSession()

        self.actor_network = ActorNetwork(self.sess,self.state_dim,self.action_dim)
        self.critic_network = CriticNetwork(self.sess,self.state_dim,self.action_dim)

        # initialize replay buffer
        self.replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE)

        # Initialize a random process the Ornstein-Uhlenbeck process for action exploration
        self.exploration_noise = OUNoise(self.action_dim)

        self.saver = tf.train.Saver()
项目:hyperchamber    作者:255BITS    | 项目源码 | 文件源码
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus, 
                 learning_rate=0.001, batch_size=100):
        self.network_architecture = network_architecture
        self.transfer_fct = transfer_fct
        self.learning_rate = learning_rate
        self.batch_size = batch_size

        # tf Graph input
        self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])

        # Create autoencoder network
        self._create_network()
        # Define loss function based variational upper-bound and 
        # corresponding optimizer
        self._create_loss_optimizer()

        # Initializing the tensor flow variables
        init = tf.initialize_all_variables()

        # Launch the session
        self.sess = tf.InteractiveSession()
        self.sess.run(init)
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def __init__ (self,n_in,hidden_layer_size,n_out,hidden_layer_type,output_type="linear",dropout_rate=0,loss_function="mse",optimizer="adam"):

        #self.session=tf.InteractiveSession()
        self.n_in  = int(n_in)
        self.n_out = int(n_out)

        self.n_layers = len(hidden_layer_size)

        self.hidden_layer_size = hidden_layer_size
        self.hidden_layer_type = hidden_layer_type

        assert len(self.hidden_layer_size) == len(self.hidden_layer_type)

        self.output_type   = output_type
        self.dropout_rate  = dropout_rate
        self.loss_function = loss_function
        self.optimizer     = optimizer
        #self.activation    ={"tanh":tf.nn.tanh,"sigmoid":tf.nn.sigmoid}
        self.graph=tf.Graph()
        #self.saver=tf.train.Saver()
项目:Deep-learning-Colorization-for-visual-media    作者:OmarSayedMostafa    | 项目源码 | 文件源码
def UserInteract(Model):
    global sess, ColorizationModel
    sess = tf.InteractiveSession() 
    ColorizationModel = Model

    print("")
    print("")

    print("         ---- DEEP LEARNING COLORIZATION FOR VISUAL MEDIA ----           ")

    print("")
    print(" Enter (T) if you want to continue Training, or")
    print(" Enter (t) if you want to Test an Image, or")
    userInput = input(" Enter(X) to exit   :   ")
    if(userInput =='T'):
        UserInteract_train()
    elif(userInput =='t'):
        UserInteract_test()
    else:
        print("Exit ... ")
项目:Malware-Classification    作者:consteax    | 项目源码 | 文件源码
def tensorFlowBasic(X_train, y_train, X_val, y_val, X_test, y_test):
    sess = tf.InteractiveSession()
    x = tf.placeholder("float", shape=[None, 400])
    y_ = tf.placeholder("float", shape=[None, 10])
    W = tf.Variable(tf.zeros([400, 10]))
    b = tf.Variable(tf.zeros([10]))
    sess.run(tf.initialize_all_variables())
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
    mydata = read_data_sets(X_train, y_train, X_val, y_val, X_test, y_test)

    for i in range(1000):
        batch = mydata.train.next_batch(50)
        train_step.run(feed_dict={x: batch[0], y_: batch[1]})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    return accuracy.eval(feed_dict={x: mydata.test.images, y_: mydata.test.labels})
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def get_eval_df(checkpoint_path="checkpoints/jul1", config='jul1.json'):
  hps = hypers.get_default_hparams()
  if config:
    with open(config) as f:
      hps.parse_json(f.read())
  hps.is_training = False
  hps.batch_size = 1
  tf.logging.info('Creating model')
  model = rnnmodel.RNNModel(hps)

  sess = tf.InteractiveSession()
  # Load pretrained weights
  tf.logging.info('Loading weights')
  utils.load_checkpoint(sess, checkpoint_path)

  tf.logging.info('Loading test set')
  user_pb = User()
  with open('testuser.pb') as f:
    user_pb.ParseFromString(f.read())
  user = UserWrapper(user_pb)

  predictor = pred.RnnModelPredictor(sess, model, .2, predict_nones=0)
  df = _get_df(user, predictor)
  return df
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('tag')
  args = parser.parse_args()
  tag = args.tag

  hps = hypers.hps_for_tag(tag)
  hps.is_training = 0
  hps.batch_size = 1
  # (dummy dataset, just so we have some placeholder values for the rnnmodel's input vars)
  dat = dataset.BasketDataset(hps, 'unit_tests.tfrecords')
  model = rnnmodel.RNNModel(hps, dat)
  sess = tf.InteractiveSession()
  utils.load_checkpoint_for_tag(tag, sess)

  def lookup(varname):
    with tf.variable_scope('instarnn', reuse=True):
        var = tf.get_variable(varname)
    val = sess.run(var)
    return val

  emb = lookup('product_embeddings')
  outpath = path_for_cached_embeddings(tag)
  np.save(outpath, emb)
  print 'Saved embeddings with shape {} to {}'.format(emb.shape, outpath)
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def precompute_probs_for_tag(tag, userfold):
  hps = hypers.hps_for_tag(tag, mode=hypers.Mode.inference)
  tf.logging.info('Creating model')
  dat = BasketDataset(hps, userfold)
  model = rnnmodel.RNNModel(hps, dat)
  sess = tf.InteractiveSession()
  # Load pretrained weights
  tf.logging.info('Loading weights')
  utils.load_checkpoint_for_tag(tag, sess)
  # TODO: deal with 'test mode'
  tf.logging.info('Calculating probabilities')
  probmap = get_probmap(model, sess)
  # Hack because of silly reasons.
  if userfold == 'validation_full':
    userfold = 'validation'
  common.save_pdict_for_tag(tag, probmap, userfold)
  sess.close()
  tf.reset_default_graph()
  return probmap
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def serialize_cifar_pool3(X,filename):
    print 'About to generate file: %s' % filename
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
    X_pool3 = batch_pool3_features(sess,X)
    np.save(filename,X_pool3)
项目:ddpg-aigym    作者:stevenpjg    | 项目源码 | 文件源码
def __init__(self, action_bounds):

        self.sess = tf.InteractiveSession()       

        self.action_size = len(action_bounds[0])

        self.action_input = tf.placeholder(tf.float32, [None, self.action_size])
        self.pmax = tf.constant(action_bounds[0], dtype = tf.float32)
        self.pmin = tf.constant(action_bounds[1], dtype = tf.float32)
        self.prange = tf.constant([x - y for x, y in zip(action_bounds[0],action_bounds[1])], dtype = tf.float32)
        self.pdiff_max = tf.div(-self.action_input+self.pmax, self.prange)
        self.pdiff_min = tf.div(self.action_input - self.pmin, self.prange)
        self.zeros_act_grad_filter = tf.zeros([self.action_size])
        self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])
        self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter), tf.mul(self.act_grad, self.pdiff_max), tf.mul(self.act_grad, self.pdiff_min))
项目:ddpg-aigym    作者:stevenpjg    | 项目源码 | 文件源码
def __init__(self,num_states,num_actions):
        self.g=tf.Graph()
        with self.g.as_default():
            self.sess = tf.InteractiveSession()


            #actor network model parameters:
            self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a,\
            self.actor_state_in, self.actor_model = self.create_actor_net(num_states, num_actions)


            #target actor network model parameters:
            self.t_W1_a, self.t_B1_a, self.t_W2_a, self.t_B2_a, self.t_W3_a, self.t_B3_a,\
            self.t_actor_state_in, self.t_actor_model = self.create_actor_net(num_states, num_actions)

            #cost of actor network:
            self.q_gradient_input = tf.placeholder("float",[None,num_actions]) #gets input from action_gradient computed in critic network file
            self.actor_parameters = [self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a]
            self.parameters_gradients = tf.gradients(self.actor_model,self.actor_parameters,-self.q_gradient_input)#/BATCH_SIZE) 
            self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(zip(self.parameters_gradients,self.actor_parameters))  
            #initialize all tensor variable parameters:
            self.sess.run(tf.initialize_all_variables())    

            #To make sure actor and target have same intial parmameters copy the parameters:
            # copy target parameters
            self.sess.run([
                self.t_W1_a.assign(self.W1_a),
                self.t_B1_a.assign(self.B1_a),
                self.t_W2_a.assign(self.W2_a),
                self.t_B2_a.assign(self.B2_a),
                self.t_W3_a.assign(self.W3_a),
                self.t_B3_a.assign(self.B3_a)])

            self.update_target_actor_op = [
                self.t_W1_a.assign(TAU*self.W1_a+(1-TAU)*self.t_W1_a),
                self.t_B1_a.assign(TAU*self.B1_a+(1-TAU)*self.t_B1_a),
                self.t_W2_a.assign(TAU*self.W2_a+(1-TAU)*self.t_W2_a),
                self.t_B2_a.assign(TAU*self.B2_a+(1-TAU)*self.t_B2_a),
                self.t_W3_a.assign(TAU*self.W3_a+(1-TAU)*self.t_W3_a),
                self.t_B3_a.assign(TAU*self.B3_a+(1-TAU)*self.t_B3_a)]
项目:sketch_rnn_classification    作者:payalbajaj    | 项目源码 | 文件源码
def trainer(model_params):
  """Train a sketch-rnn model."""
  np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)

  tf.logging.info('sketch-rnn')
  tf.logging.info('Hyperparams:')
  for key, val in model_params.values().iteritems():
    tf.logging.info('%s = %s', key, str(val))
  tf.logging.info('Loading data files.')
  datasets = load_dataset(FLAGS.data_dir, model_params)

  train_set = datasets[0]
  valid_set = datasets[1]
  test_set = datasets[2]
  model_params = datasets[3]
  eval_model_params = datasets[4]

  reset_graph()
  model = sketch_rnn_model.Model(model_params)
  eval_model = sketch_rnn_model.Model(eval_model_params, reuse=True)

  sess = tf.InteractiveSession()
  sess.run(tf.global_variables_initializer())

  if FLAGS.resume_training:
    load_checkpoint(sess, FLAGS.log_root)

  # Write config file to json file.
  tf.gfile.MakeDirs(FLAGS.log_root)
  with tf.gfile.Open(
      os.path.join(FLAGS.log_root, 'model_config.json'), 'w') as f:
    json.dump(model_params.values(), f, indent=True)

  train(sess, model, eval_model, train_set, valid_set, test_set)
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def createPiNetwork(self, player):
        # input layer
        self.stateInput = tf.placeholder(tf.float32, shape=[None, self.STATE_NUM])
        self.actionOutput = tf.placeholder(tf.float32, shape=[None, self.ACTION_NUM])

        # weights
        W1 = self.weight_variable([self.STATE_NUM, 256])
        b1 = self.bias_variable([256])

        W2 = self.weight_variable([256, 512])
        b2 = self.bias_variable([512])

        W3 = self.weight_variable([512, self.ACTION_NUM])
        b3 = self.bias_variable([self.ACTION_NUM])

        # layers
        h_layer1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(self.stateInput, W1), b1))
        # h_layer1 = self.batch_norm(h_layer1)
        h_layer2 = tf.nn.relu(tf.nn.bias_add(tf.matmul(h_layer1, W2), b2))
        # h_layer2 = self.batch_norm(h_layer2)
        self.output = tf.nn.bias_add(tf.matmul(h_layer2, W3), b3)
        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.actionOutput, logits=self.output))
        self.out = tf.nn.softmax(self.output)
        self.trainStep = tf.train.GradientDescentOptimizer(1e-2).minimize(self.cost)

        # saving and loading networks
        self.saver = tf.train.Saver()
        self.session = tf.InteractiveSession()
        checkpoint = tf.train.get_checkpoint_state('saved_PiNetworks_' + player + '/')
        if checkpoint and checkpoint.model_checkpoint_path:
            self.saver.restore(self.session, checkpoint.model_checkpoint_path)
            print("Successfully loaded:", checkpoint.model_checkpoint_path)
        else:
            print("Could not find old network weights")
            self.session.run(tf.initialize_all_variables())
项目:RL_NFSP    作者:Richard-An    | 项目源码 | 文件源码
def createQNetwork(self, player):
        # input layer
        self.stateInput = tf.placeholder(dtype=tf.float32, shape=[None, self.STATE_NUM])
        self.actionInput = tf.placeholder(dtype=tf.float32, shape=[None, self.ACTION_NUM])
        self.yInput = tf.placeholder(dtype=tf.float32, shape=[None])

        # weights
        W1 = self.weight_variable([self.STATE_NUM, 256])
        b1 = self.bias_variable([256])

        W2 = self.weight_variable([256, 512])
        b2 = self.bias_variable([512])

        W3 = self.weight_variable([512, self.ACTION_NUM])
        b3 = self.bias_variable([self.ACTION_NUM])

        # layers
        h_layer1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(self.stateInput, W1), b1))
        # h_layer1 = self.batch_norm(h_layer1)
        h_layer2 = tf.nn.relu(tf.nn.bias_add(tf.matmul(h_layer1, W2), b2))
        # h_layer2 = self.batch_norm(h_layer2)
        self.QValue = tf.nn.bias_add(tf.matmul(h_layer2, W3), b3)
        self.QValue = tf.nn.softmax(self.QValue)
        Q_action = tf.reduce_sum(tf.multiply(self.QValue, self.actionInput), reduction_indices=-1)
        self.cost = tf.reduce_mean(tf.square(self.yInput - Q_action))
        self.trainStep = tf.train.GradientDescentOptimizer(1e-6).minimize(self.cost)

        # saving and loading networks
        self.saver = tf.train.Saver()
        self.session = tf.InteractiveSession()
        checkpoint = tf.train.get_checkpoint_state('saved_QNetworks_new_' + player + '/')
        if checkpoint and checkpoint.model_checkpoint_path:
            self.saver.restore(self.session, checkpoint.model_checkpoint_path)
            print("Successfully loaded:", checkpoint.model_checkpoint_path)
        else:
            print("Could not find old network weights")
            self.session.run(tf.initialize_all_variables())
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __init__(self):
        # init some parameters
        self.replay_buffer = deque()

        self.time_step = 0

        self.epsilon = START_EPSILON

        self.state_dim = input_dim

        self.action_dim = num_output

        #initialize weights and biases of deep q net
        self.weights = {
            'w1': tf.Variable(tf.random_normal([3, 3, 2, 150])),
            'w2': tf.Variable(tf.random_normal([1, 1, 150, 1])),
            'w3': tf.Variable(tf.random_normal([3,3,1,10])),
            'out': tf.Variable(tf.random_normal([dim*dim*10, num_output]))
        }

        self.biases = {
            'b1': tf.Variable(tf.random_normal([150])),
            'b2': tf.Variable(tf.random_normal([1])),
            'b3': tf.Variable(tf.random_normal([10])),
            'out': tf.Variable(tf.random_normal([num_output]))
        }
        self.state_input = tf.placeholder("float",[None, self.state_dim[0] * self.state_dim[1], 2])
        keep_prob = tf.placeholder(tf.float32) # dropout probability

        #create deep q network
        self.deep_q_network(self.state_input, self.weights, self.biases, keep_prob)
        self.training_rules()

        # Initialize session
        self.session = tf.InteractiveSession()
        self.session.run(tf.initialize_all_variables())

        # saver
        self.saver = tf.train.Saver()
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __init__(self):
        # init some parameters
        self.replay_buffer = deque()

        self.time_step = 0

        self.epsilon = START_EPSILON

        self.state_dim = input_dim

        self.action_dim = num_output

        #initialize weights and biases of deep q net
        self.weights = {
            'w1': tf.Variable(tf.random_normal([3, 3, 2, 150])),
            'w2': tf.Variable(tf.random_normal([1, 1, 150, 1])),
            'w3': tf.Variable(tf.random_normal([3,3,1,10])),
            'out': tf.Variable(tf.random_normal([dim*dim*10, num_output]))
        }

        self.biases = {
            'b1': tf.Variable(tf.random_normal([150])),
            'b2': tf.Variable(tf.random_normal([1])),
            'b3': tf.Variable(tf.random_normal([10])),
            'out': tf.Variable(tf.random_normal([num_output]))
        }
        self.state_input = tf.placeholder("float",[None, self.state_dim[0] * self.state_dim[1], 2])
        keep_prob = tf.placeholder(tf.float32) # dropout probability

        #create deep q network
        self.deep_q_network(self.state_input, self.weights, self.biases, keep_prob)
        self.training_rules()

        # Initialize session
        self.session = tf.InteractiveSession()
        self.session.run(tf.initialize_all_variables())

        # saver
        self.saver = tf.train.Saver()
项目:Learning-to-navigate-without-a-map    作者:ToniRV    | 项目源码 | 文件源码
def __init__(self):
        # init some parameters
        self.replay_buffer = deque()

        self.time_step = 0

        self.epsilon = START_EPSILON

        self.state_dim = input_dim

        self.action_dim = num_output

        #initialize weights and biases of deep q net
        self.weights = {
            'w1': tf.Variable(tf.random_normal([3, 3, 2, 150])),
            'w2': tf.Variable(tf.random_normal([1, 1, 150, 1])),
            'w3': tf.Variable(tf.random_normal([3,3,1,10])),
            'out': tf.Variable(tf.random_normal([dim*dim*10, num_output]))
        }

        self.biases = {
            'b1': tf.Variable(tf.random_normal([150])),
            'b2': tf.Variable(tf.random_normal([1])),
            'b3': tf.Variable(tf.random_normal([10])),
            'out': tf.Variable(tf.random_normal([num_output]))
        }
        self.state_input = tf.placeholder("float",[None, self.state_dim[0] * self.state_dim[1], 2])
        keep_prob = tf.placeholder(tf.float32) # dropout probability

        #create deep q network
        self.deep_q_network(self.state_input, self.weights, self.biases, keep_prob)
        self.training_rules()

        # Initialize session
        self.session = tf.InteractiveSession()
        self.session.run(tf.initialize_all_variables())

        # saver
        self.saver = tf.train.Saver()
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def start_bundle(self, context=None):
    # There is one tensorflow session per instance of TFExampleFromImageDoFn.
    # The same instance of session is re-used between bundles.
    # Session is closed by the destructor of Session object, which is called
    # when instance of TFExampleFromImageDoFn() is destructed.
    if not self.graph:
      self.graph = tf.Graph()
      self.tf_session = tf.InteractiveSession(graph=self.graph)
      with self.graph.as_default():
        self.preprocess_graph = EmbeddingsGraph(self.tf_session)
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def play():
    sess = tf.InteractiveSession()
    nn,optimizer=createNetwort()
    trainDQN(nn,optimizer,sess)
项目:django-corenlp    作者:arunchaganty    | 项目源码 | 文件源码
def setUpClass(cls):
        cls.sess = tf.InteractiveSession()
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_sgld_dense(self):
        tf.reset_default_graph()

        x = tf.Variable(tf.zeros(20), dtype=tf.float32)
        loss = tf.reduce_sum(tf.square(x - 10))

        sgld = SGLD(learning_rate=0.4)
        train_op_sgld = sgld.minimize(loss)

        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        sess.run(train_op_sgld)
        xh = sess.run(x)
        self.assertTrue(5.0 <= xh.mean() and xh.mean() <= 11.0)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_psgld_dense(self):
        tf.reset_default_graph()

        x = tf.Variable(tf.zeros(20), dtype=tf.float32)
        loss = tf.reduce_sum(tf.square(x - 10))

        psgld = pSGLD(learning_rate=1.0)
        train_op_psgld = psgld.minimize(loss)

        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())

        sess.run(train_op_psgld)
        xh = sess.run(x)
项目:generating_sequences    作者:PFCM    | 项目源码 | 文件源码
def setup_session():
    """Clears the default graph and starts an interactive session"""
    tf.reset_default_graph()
    tf.InteractiveSession()
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def trainer(model_params):
  """Train a sketch-rnn model."""
  np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)

  tf.logging.info('sketch-rnn')
  tf.logging.info('Hyperparams:')
  for key, val in six.iteritems(model_params.values()):
    tf.logging.info('%s = %s', key, str(val))
  tf.logging.info('Loading data files.')
  datasets = load_dataset(FLAGS.data_dir, model_params)

  train_set = datasets[0]
  valid_set = datasets[1]
  test_set = datasets[2]
  model_params = datasets[3]
  eval_model_params = datasets[4]

  reset_graph()
  model = sketch_rnn_model.Model(model_params)
  eval_model = sketch_rnn_model.Model(eval_model_params, reuse=True)

  sess = tf.InteractiveSession()
  sess.run(tf.global_variables_initializer())

  if FLAGS.resume_training:
    load_checkpoint(sess, FLAGS.log_root)

  # Write config file to json file.
  tf.gfile.MakeDirs(FLAGS.log_root)
  with tf.gfile.Open(
      os.path.join(FLAGS.log_root, 'model_config.json'), 'w') as f:
    json.dump(model_params.values(), f, indent=True)

  train(sess, model, eval_model, train_set, valid_set, test_set)
项目:tensorflow-with-go    作者:nilsmagnus    | 项目源码 | 文件源码
def main(_):
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

  # GOLANG note that we must label the input-tensor!
  x = tf.placeholder(tf.float32, [None, 784], name="imageinput")
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.add(tf.matmul(x, W) , b)

  y_ = tf.placeholder(tf.float32, [None, 10])

  cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()

  # Train
  for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})


  # GOLANG note that we must label the infer-operation!!
  infer = tf.argmax(y,1, name="infer")

  correct_prediction = tf.equal(infer, tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))

  builder = tf.saved_model.builder.SavedModelBuilder("mnistmodel")

  # GOLANG note that we must tag our model so that we can retrieve it at inference-time
  builder.add_meta_graph_and_variables(sess,["serve"])

  builder.save()
项目:handson-tensorflow    作者:winnietsang    | 项目源码 | 文件源码
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    # Placeholder that will be fed image data.
    x = tf.placeholder(tf.float32, [None, 784])
    # Placeholder that will be fed the correct labels.
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Define weight and bias.
    W = weight_variable([784, 10])
    b = bias_variable([10])

    # Here we define our model which utilizes the softmax regression.
    y = tf.nn.softmax(tf.matmul(x, W) + b)

    # Define our loss.
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

    # Define our optimizer.
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    # Define accuracy.
    correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
    correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)

    # Launch session.
    sess = tf.InteractiveSession()

    # Do the training.
    for i in range(1100):
        batch = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})

    # See how model did.
    print("Test Accuracy %g" % sess.run(accuracy, feed_dict={x: mnist.test.images,
                                                             y_: mnist.test.labels}))
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def main(_):
  # Import data
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

  # Create the model
  x = tf.placeholder(tf.float32, [None, 784])
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.matmul(x, W) + b

  # Define loss and optimizer
  y_ = tf.placeholder(tf.float32, [None, 10])

  # The raw formulation of cross-entropy,
  #
  #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
  #                                 reduction_indices=[1]))
  #
  # can be numerically unstable.
  #
  # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
  # outputs of 'y', and then average across the batch.
  cross_entropy = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()
  # Train
  for _ in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

  # Test trained model
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    phase_train = tf.placeholder(tf.bool, name='phase_train')

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    t_1 = tf.matmul(x, w_1) + b_1
    bn = batch_norm(t_1, 1, phase_train)
    h_1 = binarized_ops.binarized(bn)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train, phase_train: True})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test, phase_train: False}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testOverwriteOutput():
    sess = tf.InteractiveSession()
    external_input = [0, 1., 0., 1., 1.]
    graph_input = [-5.5, 4.4, 3.4, -2.3, 1.9]
    result = overwrite_output(graph_input, external_input)
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testBinarized():
    sess = tf.InteractiveSession()
    result = binarized([-5.5, 4.4, 3.4, -2.3, 1.9])
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testOverwriteOutput():
    sess = tf.InteractiveSession()
    external_input = [0, 1., 0., 1., 1.]
    graph_input = [-5.5, 4.4, 3.4, -2.3, 1.9]
    result = overwrite_output(graph_input, external_input)
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testBinarized():
    sess = tf.InteractiveSession()
    result = binarized([-5.5, 4.4, 3.4, -2.3, 1.9])
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    phase_train = tf.placeholder(tf.bool, name='phase_train')

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    t_1 = tf.matmul(x, w_1) + b_1
    bn = batch_norm(t_1, 1, phase_train)
    h_1 = binarized_ops.binarized(bn)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train, phase_train: True})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test, phase_train: False}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testOverwriteOutput():
    sess = tf.InteractiveSession()
    external_input = [0, 1., 0., 1., 1.]
    graph_input = [-5.5, 4.4, 3.4, -2.3, 1.9]
    result = overwrite_output(graph_input, external_input)
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])
    phase_train = tf.placeholder(tf.bool, name='phase_train')

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    t_1 = tf.matmul(x, w_1) + b_1
    bn = batch_norm(t_1, 1, phase_train)
    h_1 = binarized_ops.binarized(bn)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train, phase_train: True})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test, phase_train: False}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testOverwriteOutput():
    sess = tf.InteractiveSession()
    external_input = [0, 1., 0., 1., 1.]
    graph_input = [-5.5, 4.4, 3.4, -2.3, 1.9]
    result = overwrite_output(graph_input, external_input)
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testBinarized():
    sess = tf.InteractiveSession()
    result = binarized([-5.5, 4.4, 3.4, -2.3, 1.9])
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def main():
    digits = load_digits()
    x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
                                                                           random_state=0)

    lb = preprocessing.LabelBinarizer()
    lb.fit(digits.target)
    y_train = lb.transform(y_train_)
    y_test = lb.transform(y_test_)

    sess = tf.InteractiveSession()

    x = tf.placeholder(tf.float32, shape=[None, 64])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    w_1 = weight_variable([64, 32])
    b_1 = bias_variable([32])
    h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1)

    w_2 = weight_variable([32, 10])
    b_2 = bias_variable([10])
    y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    sess.run(tf.initialize_all_variables())
    for i in range(1000):
        train_step.run(feed_dict={x: x_train, y_: y_train})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print(accuracy.eval(feed_dict={x: x_test, y_: y_test}))
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testOverwriteOutput():
    sess = tf.InteractiveSession()
    external_input = [0, 1., 0., 1., 1.]
    graph_input = [-5.5, 4.4, 3.4, -2.3, 1.9]
    result = overwrite_output(graph_input, external_input)
    with sess.as_default():
        print(result.eval())
项目:deep-spike    作者:electronicvisions    | 项目源码 | 文件源码
def testBinarized():
    sess = tf.InteractiveSession()
    result = binarized([-5.5, 4.4, 3.4, -2.3, 1.9])
    with sess.as_default():
        print(result.eval())