Python tensorflow.contrib.rnn 模块,static_bidirectional_rnn() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用tensorflow.contrib.rnn.static_bidirectional_rnn()

项目:tensorflow-examples    作者:floydhub    | 项目源码 | 文件源码
def BiRNN(x, weights, biases):

    # Prepare data shape to match `bidirectional_rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def inference(self,X,reuse = None,trainMode=True):
        word_verctors = tf.nn.embedding_lookup(self.words,X)
        length = self.length(word_verctors)
        length_64 = tf.cast(length,tf.int64)
        if trainMode:
            word_verctors = tf.nn.dropout(word_verctors,0.5)

        with tf.variable_scope('rnn_fwbw',reuse =reuse) as scope:
            lstm_fw = rnn.LSTMCell(self.numHidden)
            lsmt_bw = rnn.LSTMCell(self.numHidden)

            inputs = tf.unstack(word_verctors,nlp_segment.flags.max_sentence_len,1)
            output,_,_ = rnn.static_bidirectional_rnn(lstm_fw,lsmt_bw,inputs,sequence_length=length_64,dtype=tf.float32)
            output = tf.reshape(output,[-1,self.numHidden * 2])

        matricized_unary_scores = tf.matmul(output,self.W) + self.b
        unary_scores = tf.reshape(matricized_unary_scores,
                                  [-1,nlp_segment.flags.max_sentence_len,self.distinctTagNum])
        return unary_scores,length
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def inference(self,X,reuse = None,trainMode=True):
        word_verctors = tf.nn.embedding_lookup(self.words,X)
        length = self.length(word_verctors)
        length_64 = tf.cast(length,tf.int64)
        if trainMode:
            word_verctors = tf.nn.dropout(word_verctors,0.5)

        with tf.variable_scope('rnn_fwbw',reuse =reuse) as scope:
            lstm_fw = rnn.LSTMCell(self.numHidden)
            lsmt_bw = rnn.LSTMCell(self.numHidden)

            inputs = tf.unstack(word_verctors,self.sentence_length,1)
            output,_,_ = rnn.static_bidirectional_rnn(lstm_fw,lsmt_bw,inputs,sequence_length=length_64,dtype=tf.float32)
            output = tf.reshape(output,[-1,self.numHidden * 2])

        matricized_unary_scores = tf.matmul(output,self.W) + self.b
        unary_scores = tf.reshape(matricized_unary_scores,
                                  [-1,self.sentence_length,self.distinctTagNum])
        return unary_scores,length
项目:Deep-Learning-with-TensorFlow    作者:PacktPublishing    | 项目源码 | 文件源码
def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:Deep-Learning-with-TensorFlow    作者:PacktPublishing    | 项目源码 | 文件源码
def BiRNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:EAS    作者:han-cai    | 项目源码 | 文件源码
def build(self):
        self._define_input()

        output = self.input_seq
        output = embedding(output, self.vocab.size, self.embedding_dim, name='layer_embedding')
        input_dim = self.embedding_dim

        # Prepare data shape to match rnn function requirements
        # Current data input shape: [batch_size, num_steps, input_dim]
        # Required shape: 'num_steps' tensors list of shape [batch_size, input_dim]
        output = tf.transpose(output, [1, 0, 2])
        output = tf.reshape(output, [-1, input_dim])
        output = tf.split(output, self.num_steps, 0)

        if self.bidirectional:
            # 'num_steps' tensors list of shape [batch_size, rnn_units * 2]
            fw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
            bw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
            output, state_fw, state_bw = rnn.static_bidirectional_rnn(
                fw_cell, bw_cell, output, dtype=tf.float32, sequence_length=self.seq_len, scope='encoder')

            if isinstance(state_fw, tf.contrib.rnn.LSTMStateTuple):
                encoder_state_c = tf.concat([state_fw.c, state_bw.c], axis=1, name='bidirectional_concat_c')
                encoder_state_h = tf.concat([state_fw.h, state_bw.h], axis=1, name='bidirectional_concat_h')
                state = tf.contrib.rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
            elif isinstance(state_fw, tf.Tensor):
                state = tf.concat([state_fw, state_bw], axis=1, name='bidirectional_concat')
            else:
                raise ValueError
        else:
            # 'num_steps' tensors list of shape [batch_size, rnn_units]
            cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
            output, state = rnn.static_rnn(cell, output, dtype=tf.float32, sequence_length=self.seq_len,
                                           scope='encoder')

        output = tf.stack(output, axis=0)  # [num_steps, batch_size, rnn_units]
        output = tf.transpose(output, [1, 0, 2])  # [batch_size, num_steps, rnn_units]
        self.encoder_output = output
        self.encoder_state = state
        return output, state
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def bi_lstm_layer(self,inputs):
        if self.hidden_layer_num >1:
            lstm_fw = rnn.MultiRNNCell([self.lstm_cell() for _ in range(self.hidden_layer_num)])
            lstm_bw = rnn.MultiRNNCell([self.lstm_cell() for _ in range(self.hidden_layer_num)])
        else:
            lstm_fw = self.lstm_cell()
            lstm_bw = self.lstm_cell()

        outpus,_,_ = rnn.static_bidirectional_rnn(lstm_fw,lstm_bw,inputs,sequence_length=self.lengths,dtype=tf.float32)
        features = tf.reshape(outpus,[-1,self.num_hidden *2])
        return  features
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def __init__(self):
        self.embeddingSize = nlp_segment.flags.embedding_size
        self.num_tags = nlp_segment.flags.num_tags
        self.num_hidden = nlp_segment.flags.num_hidden
        self.learning_rate = nlp_segment.flags.learning_rate
        self.batch_size = nlp_segment.flags.batch_size
        self.model_save_path = nlp_segment.model_save_path

        self.input = tf.placeholder(tf.int32,
                                  shape=[None, FLAGS.max_sentence_len],
                                  name="input_placeholder")

        self.label = tf.placeholder(tf.int32,
                                    shape=[None, FLAGS.max_sentence_len],
                                    name="label_placeholder")
        self.dropout = tf.placeholder(tf.float32,name="dropout")

        with tf.name_scope("embedding_layer"):
            self.word_embedding = tf.Variable(data_loader.load_w2v(nlp_segment.word_vec_path), name="word_embedding")
            inputs_embed = tf.nn.embedding_lookup(self.word_embedding,self.input)
            length = self.length(self.input)
            self.length_64 = tf.cast(length, tf.int64)
            reuse = None #if self.trainMode else True


            # if trainMode:
            #  word_vectors = tf.nn.dropout(word_vectors, 0.5)
            with tf.name_scope("rnn_fwbw") as scope:
                lstm_fw = rnn.LSTMCell(self.num_hidden,use_peepholes=True)
                lstm_bw = rnn.LSTMCell(self.num_hidden,use_peepholes=True)

                inputs = tf.unstack(inputs_embed, nlp_segment.flags.max_sentence_len, 1)
                outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw, lstm_bw, inputs, sequence_length=self.length_64,
                                                            dtype=tf.float32)
            output = tf.reshape(outputs, [-1, self.num_hidden * 2])
            #if self.trainMode:
            output = tf.nn.dropout(output, self.dropout)

        with tf.variable_scope('Softmax') as scope:
            self.W = tf.get_variable(shape=[self.num_hidden * 2, self.num_tags],
                                     initializer=tf.truncated_normal_initializer(stddev=0.01),
                                     name='weights',
                                     regularizer=l2_regularizer(0.001))
            self.b = tf.Variable(tf.zeros([self.num_tags], name='bias'))
            matricized_unary_scores = tf.matmul(output, self.W) + self.b
            # matricized_unary_scores = tf.nn.log_softmax(matricized_unary_scores)
            self.unary_scores = tf.reshape(
                matricized_unary_scores,
                [-1, FLAGS.max_sentence_len, self.num_tags])
        with tf.name_scope("crf"):
            self.transition_params = tf.get_variable(
                "transitions",
                shape=[self.num_tags, self.num_tags],
                initializer=self.initializer)
            log_likelihood, self.transition_params = crf.crf_log_likelihood(self.unary_scores, self.label, self.length_64,self.transition_params)
        self.loss = tf.reduce_mean(-log_likelihood)
        self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
        self.saver = tf.train.Saver()
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def bi_lstm_layer(self,inputs):
        if self.hidden_layer_num >1:
            lstm_fw = rnn.MultiRNNCell([self.lstm_cell() for _ in range(self.hidden_layer_num)])
            lstm_bw = rnn.MultiRNNCell([self.lstm_cell() for _ in range(self.hidden_layer_num)])
        else:
            lstm_fw = self.lstm_cell()
            lstm_bw = self.lstm_cell()

        outpus,_,_ = rnn.static_bidirectional_rnn(lstm_fw,lstm_bw,inputs,dtype=tf.float32)
        features = tf.reshape(outpus,[-1,self.hidden_neural_size *2])
        return  features
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def bilstm_layer(self,inputs):

        if self.hidden_layer_num >1:
            lstm_fw = rnn.MultiRNNCell([self.lstm_fw() for _ in range(self.hidden_layer_num)])
            lstm_bw = rnn.MultiRNNCell([self.lstm_bw() for _ in range(self.hidden_layer_num)])
        else:
            lstm_fw = self.lstm_fw()
            lstm_bw = self.lstm_bw()


        #outputs,_ = tf.nn.(cell_fw=lstm_fw,cell_bw=lstm_bw,inputs=inputs,dtype=tf.float32)
        outputs,_,_ = rnn.static_bidirectional_rnn(lstm_fw,lstm_bw,inputs,dtype=tf.float32)
        #outputs = tf.concat(outputs, 2)
        output = outputs[-1]
        return output
项目:ML    作者:JNU-Room    | 项目源码 | 文件源码
def BiRNN(x, weights, biases):

    # Prepare data shape to match `bidirectional_rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshape to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(x, n_steps, 0)

    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    try:
        outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                              dtype=tf.float32)
    except Exception: # Old TensorFlow version only returns outputs not states
        outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
                                        dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:tensorflow_video_classification_LSTM    作者:frankgu    | 项目源码 | 文件源码
def _init_model(self):
    # Create multiple forward lstm cell
    cell_fw = rnn.MultiRNNCell(
      [rnn.BasicLSTMCell(self._config['hidden_size']) 
        for _ in range(self._config['num_layers'])]) 

    # Create multiple backward lstm cell
    cell_bw = rnn.MultiRNNCell(
      [rnn.BasicLSTMCell(self._config['hidden_size']) 
        for _ in range(self._config['num_layers'])]) 

    inputs = self._input.input_data

    # Add dropout layer to the input data 
    if self._is_training and self._config['keep_prob'] < 1:
      intpus = [tf.nn.dropout(single_input, self._config['keep_prob'])
                    for single_input in inputs]

    self._outputs, _, _ = rnn.static_bidirectional_rnn(
                            cell_fw, cell_bw, inputs, dtype=tf.float32)

    # Hidden layer weights => 2*hidden_size because of forward + backward cells
    softmax_w = tf.get_variable("softmax_w",
      [2*self._config['hidden_size'], self._config['num_classes']])
    softmax_b = tf.get_variable("softmax_b", [self._config['num_classes']])

    # Linear activation, using rnn inner loop last output
    #   logit shape: [batch_size, num_classes]
    self._logits = tf.matmul(self._outputs[-1], softmax_w) + softmax_b

    # Define loss
    # Required targets shape: [batch_size, num_classes] (one hot vector)
    self._cost = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(logits=self._logits, 
                                              labels=self._input.targets))
    # Evaluate model
    self._correct_pred = tf.equal(tf.argmax(self._logits, 1), 
                                  tf.argmax(self._input.targets, 1))
    self.accuracy = tf.reduce_mean(tf.cast(self._correct_pred, tf.float32))

    # Define optimizer
    self._lr = tf.Variable(0.0, trainable=False)
    self._train_op = tf.train.AdamOptimizer(
      learning_rate=self._lr).minimize(self._cost)

    self._new_lr = tf.placeholder(
      tf.float32, shape=[], name="new_learning_rate")
    self._lr_update = tf.assign(self._lr, self._new_lr)
项目:rnn-nlu    作者:HadoopIt    | 项目源码 | 文件源码
def generate_rnn_output(self):
    """
    Generate RNN state outputs with word embeddings as inputs
    """
    with tf.variable_scope("generate_seq_output"):
      if self.bidirectional_rnn:
        embedding = tf.get_variable("embedding",
                                    [self.source_vocab_size,
                                     self.word_embedding_size])
        encoder_emb_inputs = list()
        encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input)\
                                for encoder_input in self.encoder_inputs]
        rnn_outputs = static_bidirectional_rnn(self.cell_fw,
                                               self.cell_bw, 
                                               encoder_emb_inputs, 
                                               sequence_length=self.sequence_length,
                                               dtype=tf.float32)
        encoder_outputs, encoder_state_fw, encoder_state_bw = rnn_outputs
        # with state_is_tuple = True, if num_layers > 1, 
        # here we simply use the state from last layer as the encoder state
        state_fw = encoder_state_fw[-1]
        state_bw = encoder_state_bw[-1]
        encoder_state = tf.concat([tf.concat(state_fw, 1),
                                   tf.concat(state_bw, 1)], 1)
        top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size \
                                  + self.cell_bw.output_size])
                      for e in encoder_outputs]
        attention_states = tf.concat(top_states, 1)
      else:
        embedding = tf.get_variable("embedding", 
                                    [self.source_vocab_size,
                                     self.word_embedding_size])
        encoder_emb_inputs = list()
        encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input)\
                              for encoder_input in self.encoder_inputs] 
        rnn_outputs = static_rnn(self.cell_fw,
                                 encoder_emb_inputs,
                                 sequence_length=self.sequence_length,
                                 dtype=tf.float32)
        encoder_outputs, encoder_state = rnn_outputs
        # with state_is_tuple = True, if num_layers > 1, 
        # here we use the state from last layer as the encoder state
        state = encoder_state[-1]
        encoder_state = tf.concat(state, 1)
        top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size])
                      for e in encoder_outputs]
        attention_states = tf.concat(top_states, 1)
      return encoder_outputs, encoder_state, attention_states