Python tensorflow.python.ops.rnn 模块,bidirectional_rnn() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用tensorflow.python.ops.rnn.bidirectional_rnn()

项目:TensorFlow-DNNs-for-Predicting-DNA-Transcription-Factor-Binding    作者:adwiens    | 项目源码 | 文件源码
def BiRNN(x, n_input, n_steps, n_hidden):
    # Prepare data shape to match `bidirectional_rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshape to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(0, n_steps, x)
    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Get lstm cell output
    outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)
    return outputs
项目:TensorFlow-DNNs-for-Predicting-DNA-Transcription-Factor-Binding    作者:adwiens    | 项目源码 | 文件源码
def BiRNN(x, n_hidden):
    # Prepare data shape to match `bidirectional_rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshape to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(0, n_steps, x)
    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Get lstm cell output
    outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype=tf.float32)
    return outputs
项目:tf_han    作者:AlbertXiebnu    | 项目源码 | 文件源码
def sentence_embedding(self, inputs, keep_prob, w):
        with tf.device('/cpu:0'):
            embedding_layer = tf.nn.embedding_lookup(w['word_embedding_w'],inputs)
        # batch_size x max_len x word_embedding
        cell_input = tf.transpose(embedding_layer,[1,0,2])
        cell_input = tf.reshape(cell_input,[-1,self.hiddensize])
        cell_input = tf.split(0,self.max_len,cell_input)
        with tf.variable_scope('forward'):
            lstm_fw_cell = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(self.rnnsize,forget_bias=1.0,state_is_tuple=True),input_keep_prob=keep_prob,output_keep_prob=keep_prob)
        with tf.variable_scope('backward'):
            lstm_bw_cell = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(self.rnnsize,forget_bias=1.0,state_is_tuple=True),input_keep_prob=keep_prob,output_keep_prob=keep_prob)
        outputs,_,_ = rnn.bidirectional_rnn(lstm_fw_cell,lstm_bw_cell,cell_input,dtype=tf.float32)
        # outputs shape: seq_len x [batch_size x (fw_cell_size + bw_cell_size)]
        att = self.attention_layer(outputs,w)
        return att
项目:tf_han    作者:AlbertXiebnu    | 项目源码 | 文件源码
def build(self, inputs, keep_prob, n_classes, word_embedding):

        inputs = tf.transpose(inputs,[1,0,2])
        inputs = tf.reshape(inputs,[-1,self.max_len])
        inputs = tf.split(0, self.max_sen, inputs)

        variable_dict = {
            "word_embedding_w": tf.get_variable(name="word_embedding",shape=[self.vocabsize,self.hiddensize],initializer=tf.constant_initializer(word_embedding),trainable=True),
            "attention_w" : tf.get_variable(name="word_attention_weights",shape=[2*self.rnnsize,2*self.rnnsize]),
            "attention_b" : tf.get_variable(name="word_attention_bias",shape=[2*self.rnnsize]),
            "attention_c" : tf.get_variable(name="word_attention_context",shape=[2*self.rnnsize,1]),
        }

        sent_embeddings = []
        with tf.variable_scope("embedding_scope") as scope:
            for x in inputs:
                embedding = self.sentence_embedding(x,keep_prob,variable_dict)
                sent_embeddings.append(embedding)
                scope.reuse_variables()

        with tf.variable_scope('forward'):
            lstm_fw_cell = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(self.docsize,forget_bias=1.0,state_is_tuple=True),input_keep_prob=keep_prob,output_keep_prob=keep_prob)
        with tf.variable_scope('backward'):
            lstm_bw_cell = rnn_cell.DropoutWrapper(rnn_cell.BasicLSTMCell(self.docsize,forget_bias=1.0,state_is_tuple=True),input_keep_prob=keep_prob,output_keep_prob=keep_prob)
        outputs, _ , _ = rnn.bidirectional_rnn(lstm_fw_cell,lstm_bw_cell,sent_embeddings,dtype=tf.float32)

        atten_variable_dict = {
            "attention_w" : tf.get_variable(name="sent_attention_weights", shape=[2*self.docsize,2*self.docsize]),
            "attention_b" : tf.get_variable(name="sent_attention_bias", shape=[2*self.docsize]),
            "attention_c" : tf.get_variable(name="sent_attention_context", shape=[2*self.docsize,1]),
        }

        att = self.attention_layer(outputs,atten_variable_dict)
        # full connected layer
        W = tf.get_variable("fullconnect_weights",shape=[2 * self.docsize,n_classes])
        B = tf.get_variable("fullconnect_bias",shape=[n_classes])
        output = tf.add(tf.matmul(att,W),B,name="output")
        return output
项目:RecursiveNN    作者:sapruash    | 项目源码 | 文件源码
def compute_states(self,emb):
        def unpack_sequence(tensor):
            return tf.unpack(tf.transpose(tensor, perm=[1, 0, 2]))



        with tf.variable_scope("Composition",initializer=
                               tf.contrib.layers.xavier_initializer(),regularizer=
                               tf.contrib.layers.l2_regularizer(self.reg)):
            cell_fw = rnn_cell.LSTMCell(self.hidden_dim)
            cell_bw = rnn_cell.LSTMCell(self.hidden_dim)
            #tf.cond(tf.less(self.dropout
            #if tf.less(self.dropout, tf.constant(1.0)):
            cell_fw = rnn_cell.DropoutWrapper(cell_fw,
                                           output_keep_prob=self.dropout,input_keep_prob=self.dropout)
            cell_bw=rnn_cell.DropoutWrapper(cell_bw, output_keep_prob=self.dropout,input_keep_prob=self.dropout)

            #output, state = rnn.dynamic_rnn(cell,emb,sequence_length=self.lngths,dtype=tf.float32)
            outputs,_,_=rnn.bidirectional_rnn(cell_fw,cell_bw,unpack_sequence(emb),sequence_length=self.lngths,dtype=tf.float32)
            #output = pack_sequence(outputs)
        sum_out=tf.reduce_sum(tf.pack(outputs),[0])
        sent_rep = tf.div(sum_out,tf.expand_dims(tf.to_float(self.lngths),1))



        final_state=sent_rep
        return final_state
项目:Variational-Recurrent-Autoencoder-Tensorflow    作者:Chung-I    | 项目源码 | 文件源码
def embedding_encoder(encoder_inputs,
                      cell,
                      embedding,
                      num_symbols,
                      embedding_size,
                      bidirectional=False,
                      dtype=None,
                      weight_initializer=None,
                      scope=None):

  with variable_scope.variable_scope(
      scope or "embedding_encoder", dtype=dtype) as scope:
    dtype = scope.dtype
    # Encoder.
    if not embedding:
      embedding = variable_scope.get_variable("embedding", [num_symbols, embedding_size],
              initializer=weight_initializer())
    emb_inp = [embedding_ops.embedding_lookup(embedding, i) for i in encoder_inputs]
    if bidirectional:
      _, output_state_fw, output_state_bw = rnn.bidirectional_rnn(cell, cell, emb_inp,
              dtype=dtype)
      encoder_state = tf.concat(1, [output_state_fw, output_state_bw])
    else:
      _, encoder_state = rnn.rnn(
        cell, emb_inp, dtype=dtype)

    return encoder_state
项目:returnn-benchmarks    作者:rwth-i6    | 项目源码 | 文件源码
def BiRNN(x, weights, biases):

    # Prepare data shape to match `bidirectional_rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshape to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(0, n_steps, x)

    # Define lstm cells with tensorflow
    with tf.variable_scope("lstm1") as scope1:
        lstm_fw_cell_1 = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
        lstm_bw_cell_1 = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
        outputs_1, _, _ = rnn.bidirectional_rnn(lstm_fw_cell_1, lstm_bw_cell_1, x, dtype=tf.float32)

    with tf.variable_scope("lstm2") as scope2:
        lstm_fw_cell_2 = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
        lstm_bw_cell_2 = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
        outputs_2, _, _ = rnn.bidirectional_rnn(lstm_fw_cell_2, lstm_bw_cell_2, outputs_1, dtype=tf.float32)

    with tf.variable_scope("lstm3") as scope3:
        lstm_fw_cell_3 = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
        lstm_bw_cell_3 = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
        outputs_3, _, _ = rnn.bidirectional_rnn(lstm_fw_cell_3, lstm_bw_cell_3, outputs_2, dtype=tf.float32)

    outputs = outputs_3
    outputs = tf.reshape(tf.concat(0, outputs), [MAX_LEN*BATCH_SIZE,n_hidden*2])
    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs, weights['out']) + biases['out']