Python tensorflow.contrib.rnn 模块,static_rnn() 实例源码

我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用tensorflow.contrib.rnn.static_rnn()

项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def feed_network(self,data,keep_prob,chunk_size,n_chunks,dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._gru_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)
        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]
        output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases)
        return output
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def feed_network(self,data,keep_prob,chunk_size,n_chunks, dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._lstm_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)

        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]

        output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases)
        return output
项目:Deep-Learning-Experiments    作者:roatienza    | 项目源码 | 文件源码
def RNN(x, weights, biases):

    # reshape to [1, n_input]
    x = tf.reshape(x, [-1, n_input])

    # Generate a n_input-element sequence of inputs
    # (eg. [had] [a] [general] -> [20] [6] [33])
    x = tf.split(x,n_input,1)

    # 2-layer LSTM, each layer has n_hidden units.
    # Average Accuracy= 95.20% at 50k iter
    rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden),rnn.BasicLSTMCell(n_hidden)])

    # 1-layer LSTM with n_hidden units but with lower accuracy.
    # Average Accuracy= 90.60% 50k iter
    # Uncomment line below to test but comment out the 2-layer rnn.MultiRNNCell above
    # rnn_cell = rnn.BasicLSTMCell(n_hidden)

    # generate prediction
    outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)

    # there are n_input outputs but
    # we only want the last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:Stacked_LSTMS_Highway_Residual_On_TimeSeries_Datasets    作者:praveendareddy21    | 项目源码 | 文件源码
def model(X, W, B, lstm_size):
    # X, input shape: (batch_size, time_step_size, input_vec_size)
    XT = tf.transpose(X, [1, 0, 2])  # permute time_step_size and batch_size
    # XT shape: (time_step_size, batch_size, input_vec_size)
    XR = tf.reshape(XT, [-1, lstm_size]) # each row has input for each lstm cell (lstm_size=input_vec_size)
    # XR shape: (time_step_size * batch_size, input_vec_size)
    X_split = tf.split(XR, time_step_size, 0) # split them to time_step_size (28 arrays)
    # Each array shape: (batch_size, input_vec_size)

    # Make lstm with lstm_size (each input vector size)
    lstm = rnn.BasicLSTMCell(lstm_size, forget_bias=1.0, state_is_tuple=True)

    # Get lstm cell output, time_step_size (28) arrays with lstm_size output: (batch_size, lstm_size)
    outputs, _states = rnn.static_rnn(lstm, X_split, dtype=tf.float32)

    # Linear activation
    # Get the last output
    return tf.matmul(outputs[-1], W) + B, lstm.state_size # State size to initialize the stat



############################## model definition end ######################################
项目:tensorflow-examples    作者:floydhub    | 项目源码 | 文件源码
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:DeepTrade    作者:happynoom    | 项目源码 | 文件源码
def _create_loss(self):
        '''
        Risk estimation loss function. The output is the planed position we should hold to next day. The change rate of
        next day is self.y, so we loss two categories of money: - self.y * self.position is trade loss,
        cost * self.position is constant loss because of tax and like missing profit of buying national debt. Therefore,
        the loss function is formulated as: 100 * (- self.y * self.position + cost * self.position) = -100 * ((self.y - cost) * self.position)
        :return:
        '''
        #with tf.device("/cpu:0"):
        xx = tf.unstack(self.x, self.step, 1)
        lstm_cell = rnn.LSTMCell(self.hidden_size, forget_bias=1.0, initializer=orthogonal_initializer())
        dropout_cell = DropoutWrapper(lstm_cell, input_keep_prob=self.keep_rate, output_keep_prob=self.keep_rate, state_keep_prob=self.keep_rate)
        outputs, states = rnn.static_rnn(dropout_cell, xx, dtype=tf.float32)
        signal = tf.matmul(outputs[-1], self.weights['out']) + self.biases['out']
        scope = "activation_batch_norm"
        norm_signal = self.batch_norm_layer(signal, scope=scope)
        # batch_norm(signal, 0.9, center=True, scale=True, epsilon=0.001, activation_fn=tf.nn.relu6,
        #           is_training=is_training, scope="activation_batch_norm", reuse=False)
        self.position = tf.nn.relu6(norm_signal, name="relu_limit") / 6.
        self.avg_position = tf.reduce_mean(self.position)
        # self.cost = 0.0002
        self.loss = -100. * tf.reduce_mean(tf.multiply((self.y - self.cost), self.position, name="estimated_risk"))
项目:TensorFlow-Bitcoin-Robot    作者:TensorFlowNews    | 项目源码 | 文件源码
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, n_steps, 1)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:ML    作者:JNU-Room    | 项目源码 | 文件源码
def RNN(x, weights, biases):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Required shape: 'n_steps' tensors list of shape (batch_size, n_input)

    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshaping to (n_steps*batch_size, n_input)
    x = tf.reshape(x, [-1, n_input])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
    x = tf.split(x, n_steps, 0)

    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # Get lstm cell output
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:Relation-Network    作者:juung    | 项目源码 | 文件源码
def questionLSTM(self, q, q_real_len, reuse = False, scope= "questionLSTM"):
        """
        Args
            q: zero padded qeustions, shape=[batch_size, q_max_len]
            q_real_len: original question length, shape = [batch_size, 1]

        Returns
            embedded_q: embedded questions, shape = [batch_size, q_hidden(32)]
        """
        embedded_q_word = tf.nn.embedding_lookup(self.q_word_embed_matrix, q)
        q_input = tf.unstack(embedded_q_word, num = self.q_max_len, axis=1)
        lstm_cell = rnn.BasicLSTMCell(self.q_hidden, reuse = reuse)
        outputs, _ = rnn.static_rnn(lstm_cell, q_input, dtype = tf.float32, scope = scope)

        outputs = tf.stack(outputs)
        outputs = tf.transpose(outputs, [1,0,2])
        index = tf.range(0, self.batch_size) * (self.q_max_len) + (q_real_len - 1)
        outputs = tf.gather(tf.reshape(outputs, [-1, self.s_hidden]), index)
        return outputs
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def rnn_seq2seq(encoder_inputs,
                decoder_inputs,
                encoder_cell,
                decoder_cell=None,
                dtype=dtypes.float32,
                scope=None):
  """RNN Sequence to Sequence model.

  Args:
    encoder_inputs: List of tensors, inputs for encoder.
    decoder_inputs: List of tensors, inputs for decoder.
    encoder_cell: RNN cell to use for encoder.
    decoder_cell: RNN cell to use for decoder, if None encoder_cell is used.
    dtype: Type to initialize encoder state with.
    scope: Scope to use, if None new will be produced.

  Returns:
    List of tensors for outputs and states for trianing and sampling sub-graphs.
  """
  with vs.variable_scope(scope or "rnn_seq2seq"):
    _, last_enc_state = rnn.static_rnn(
        encoder_cell, encoder_inputs, dtype=dtype)
    return rnn_decoder(decoder_inputs, last_enc_state, decoder_cell or
                       encoder_cell)
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def feed_network(self,data,keep_prob,chunk_size,n_chunks,dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout_1 = tf.nn.dropout(self._layer_weights_1, keep_prob)
        weight_dropout_2 = tf.nn.dropout(self._layer_weights_2, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._lstm_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)
        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]
        output1 = tf.add(tf.matmul(output,weight_dropout_1), self._layer_biases_1)
        input2 = tf.nn.relu(output1)
        output2 = tf.add(tf.matmul(output,weight_dropout_2), self._layer_biases_2)
        return output2
项目:Project101    作者:Wonjuseo    | 项目源码 | 文件源码
def RNN(x, weights, biases):

    x = tf.unstack(x, n_steps, 1)
    # Define a lstm cell
    lstem_cell = rnn.BasicLSTMCell(n_hidden,forget_bias = 1.0)

    outputs, states = rnn.static_rnn(lstem_cell,x,dtype=tf.float32)

    return tf.matmul(outputs[-1],weights['out'])+biases['out']
项目:Deep-Learning-with-TensorFlow    作者:PacktPublishing    | 项目源码 | 文件源码
def RNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:Deep-Learning-with-TensorFlow    作者:PacktPublishing    | 项目源码 | 文件源码
def RNN(x, weights, biases):
    x = tf.transpose(x, [1, 0, 2])
    x = tf.reshape(x, [-1, n_input])
    x = tf.split(axis=0, num_or_size_splits=n_steps, value=x)
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:LSTM-Time-Series-Analysis-using-Tensorflow    作者:pusj    | 项目源码 | 文件源码
def lstm_model(time_steps, rnn_layers, dense_layers=None, learning_rate=0.01, optimizer='Adagrad',learning_rate_decay_fn = None): # [Ftrl, Adam, Adagrad, Momentum, SGD, RMSProp]
    print(time_steps)
    #exit(0)
    """
        Creates a deep model based on:
            * stacked lstm cells
            * an optional dense layers
        :param num_units: the size of the cells.
        :param rnn_layers: list of int or dict
                             * list of int: the steps used to instantiate the `BasicLSTMCell` cell
                             * list of dict: [{steps: int, keep_prob: int}, ...]
        :param dense_layers: list of nodes for each layer
        :return: the model definition
        """

    def lstm_cells(layers):
        print('-------------------------sdsdsdsdssd---------------------------------------------',layers)
        if isinstance(layers[0], dict):
            return [rnn.DropoutWrapper(rnn.BasicLSTMCell(layer['num_units'],state_is_tuple=True),layer['keep_prob'])
                    if layer.get('keep_prob')
                    else rnn.BasicLSTMCell(layer['num_units'], state_is_tuple=True)
                    for layer in layers]

        return [rnn.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]

    def dnn_layers(input_layers, layers):
        if layers and isinstance(layers, dict):
            return tflayers.stack(input_layers, tflayers.fully_connected,
                                  layers['layers'],
                                  activation=layers.get('activation'),
                                  dropout=layers.get('dropout'))
        elif layers:
            return tflayers.stack(input_layers, tflayers.fully_connected, layers)
        else:
            return input_layers

    def _lstm_model(X, y):
        stacked_lstm = rnn.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
        x_ =  tf.unstack(X, num=time_steps, axis=1)


        output, layers = rnn.static_rnn(stacked_lstm, x_, dtype=dtypes.float32)
        output = dnn_layers(output[-1], dense_layers)
        prediction, loss = tflearn.models.linear_regression(output, y)
        train_op = tf.contrib.layers.optimize_loss(
            loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
            learning_rate = tf.train.exponential_decay(learning_rate, tf.contrib.framework.get_global_step(), decay_steps = 1000, decay_rate = 0.9, staircase=False, name=None))

        print('learning_rate',learning_rate)
        return prediction, loss, train_op

    # https://www.tensorflow.org/versions/r0.10/api_docs/python/train/decaying_the_learning_rate

    return _lstm_model
项目:EAS    作者:han-cai    | 项目源码 | 文件源码
def build(self):
        self._define_input()

        output = self.input_seq
        output = embedding(output, self.vocab.size, self.embedding_dim, name='layer_embedding')
        input_dim = self.embedding_dim

        # Prepare data shape to match rnn function requirements
        # Current data input shape: [batch_size, num_steps, input_dim]
        # Required shape: 'num_steps' tensors list of shape [batch_size, input_dim]
        output = tf.transpose(output, [1, 0, 2])
        output = tf.reshape(output, [-1, input_dim])
        output = tf.split(output, self.num_steps, 0)

        if self.bidirectional:
            # 'num_steps' tensors list of shape [batch_size, rnn_units * 2]
            fw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
            bw_cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
            output, state_fw, state_bw = rnn.static_bidirectional_rnn(
                fw_cell, bw_cell, output, dtype=tf.float32, sequence_length=self.seq_len, scope='encoder')

            if isinstance(state_fw, tf.contrib.rnn.LSTMStateTuple):
                encoder_state_c = tf.concat([state_fw.c, state_bw.c], axis=1, name='bidirectional_concat_c')
                encoder_state_h = tf.concat([state_fw.h, state_bw.h], axis=1, name='bidirectional_concat_h')
                state = tf.contrib.rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
            elif isinstance(state_fw, tf.Tensor):
                state = tf.concat([state_fw, state_bw], axis=1, name='bidirectional_concat')
            else:
                raise ValueError
        else:
            # 'num_steps' tensors list of shape [batch_size, rnn_units]
            cell = build_cell(self.rnn_units, self.cell_type, self.rnn_layers)
            output, state = rnn.static_rnn(cell, output, dtype=tf.float32, sequence_length=self.seq_len,
                                           scope='encoder')

        output = tf.stack(output, axis=0)  # [num_steps, batch_size, rnn_units]
        output = tf.transpose(output, [1, 0, 2])  # [batch_size, num_steps, rnn_units]
        self.encoder_output = output
        self.encoder_state = state
        return output, state
项目:MovieComment2Rating    作者:yaokai1117    | 项目源码 | 文件源码
def __init__(self, sent_length, class_num,
                 embedding_size, initial_embedding_dict,
                 l2_lambda, hidden_size):

        self.input_x = tf.placeholder(tf.int32, [None, sent_length], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, class_num], name="input_y")
        self.dropout_keep_prob_1 = tf.placeholder(tf.float32, name="dropout_keep_prob_1")
        self.dropout_keep_prob_2 = tf.placeholder(tf.float32, name="dropout_keep_prob_2")

        l2_loss = tf.constant(0.0)

        with tf.name_scope("embedding"):
            self.embedding_dict = tf.Variable(initial_embedding_dict, name="Embedding", dtype=tf.float32)
            self.embedded_chars = tf.nn.embedding_lookup(self.embedding_dict, self.input_x)
            # unstack embedded input
            self.unstacked = tf.unstack(self.embedded_chars, sent_length, 1)

        with tf.name_scope("lstm"):
            # create a LSTM network
            lstm_cell = rnn.BasicLSTMCell(hidden_size)
            self.output, self.states = rnn.static_rnn(lstm_cell, self.unstacked, dtype=tf.float32)
            self.pooling = tf.reduce_mean(self.output, 0)

        with tf.name_scope("linear"):
            weights = tf.get_variable(
                "W",
                shape=[hidden_size, class_num],
                initializer=tf.contrib.layers.xavier_initializer())
            bias = tf.Variable(tf.constant(0.1, shape=[class_num]), name="b")
            l2_loss += tf.nn.l2_loss(weights)
            l2_loss += tf.nn.l2_loss(bias)
            self.linear_result = tf.nn.xw_plus_b(self.pooling, weights, bias, name="linear")
            self.predictions = tf.arg_max(self.linear_result, 1, name="predictions")

        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.linear_result, labels=self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_lambda * l2_loss

        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def RNN(x,weights,biases):
    #x = tf.transpose(x,[1,0,2])  #x = tf.unstack(x,n_steps,1)
   # x = tf.reshape(x, [-1, n_input])
    x = tf.unstack(x, n_steps, 1)
    lstm_cell = rnn.BasicLSTMCell(n_hidden,forget_bias=1.0)
    outputs,states = rnn.static_rnn(lstm_cell,x,dtype=tf.float32)

    return tf.matmul(outputs[-1],weights['out']) + biases['out']
项目:Learning-DeepLearning    作者:zuoxiang95    | 项目源码 | 文件源码
def RNN(inputs, weights, biases):
    # ???????batch_size*28*28???????????[batch_size, n_step]?tensor???List
    x = tf.unstack(inputs, n_step, 1)

    # ??lstm???
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)

    # ??lstm?????
    outputs, states = rnn.static_rnn(lstm_cell,x, dtype=tf.float32)

    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:visual-question-answering-tensorflow    作者:lmelvix    | 项目源码 | 文件源码
def ques_semantics(word, weight, bias):
    with tf.variable_scope('LSTM') as scope:
        word = tf.unstack(word, 22, 1)
        lstm_cell = rnn.BasicLSTMCell(256, forget_bias=1.0)
        output, states = rnn.static_rnn(lstm_cell, word, dtype=tf.float32)
        ques_sem = tf.matmul(states[-1], weight) + bias
        return tf.nn.relu(ques_sem, "ques-semantics-acitvation")
项目:WaNN    作者:TeoZosa    | 项目源码 | 文件源码
def RNN(layer_in, num_hidden_layers, num_hidden_units, num_inputs_in=155):
    layer_in = tf.reshape(layer_in, [-1, 8 * 8])

    n_features = layer_in.get_shape().as_list()[1]
    num_inputs_in = 155
    num_classes = 155
    # reshape to [1, n_input]
    X = tf.reshape(layer_in, [-1, n_features])

    # Generate a n_input-element sequence of inputs
    # (eg. [had] [a] [general] -> [20] [6] [33])
    X = tf.split(X, n_features, 1)

    # 1-layer LSTM with n_hidden units.

    # rnn_cell = rnn.BasicLSTMCell(num_hidden)

    rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(num_hidden_units)] * num_hidden_layers)


    # generate prediction
    outputs, states = rnn.static_rnn(rnn_cell, X, dtype=tf.float32)

    # there are n_input outputs but
    # we only want the last output
    weights = {
        'out': tf.Variable(tf.random_normal([num_hidden_units, num_classes]))
    }
    biases = {
        'out': tf.Variable(tf.random_normal([num_classes]))
    }
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:WaNN    作者:TeoZosa    | 项目源码 | 文件源码
def RNN(X, num_hidden_layers):

    # reshape to [1, n_input]
    std_dev_He = np.sqrt(2 / np.prod(X.get_shape().as_list()[1:]))
    X = tf.reshape(X, [-1, sequence_length* 8*8])


    # Generate a n_input-element sequence of inputs
    # (eg. [had] [a] [general] -> [20] [6] [33])
    X = tf.split(X, sequence_length, 1)

    # 1-layer LSTM with n_hidden units.

    # rnn_cell = rnn.BasicLSTMCell(n_hidden)
    with tf.variable_scope('RNN', tf.random_normal_initializer(mean=0.0, stddev=std_dev_He)): #tf.random_normal_initializer(mean=0.0, stddev=std_dev_He) #initializer=tf.contrib.layers.xavier_initializer()
        # weights = {
        #     'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
        # }
        # biases = {
        #     'out': tf.Variable(tf.random_normal([num_classes]))
        # }
        weights = tf.get_variable(
            name='weights',
            shape=[num_hidden, num_classes],  # 1 x 64 filter in, 1 class out
            dtype=tf.float32,
            initializer=tf.contrib.layers.xavier_initializer())
        biases = tf.get_variable(
            name='biases',
            shape=[num_classes],
            dtype=tf.float32,
            initializer=tf.constant_initializer(0.0))
        GRU_cell_layer = [rnn.GRUCell(num_hidden)]
        # LSTM_cell_layer = [rnn.BasicLSTMCell(num_hidden, forget_bias=1)]
        rnn_cell = rnn.MultiRNNCell(GRU_cell_layer * num_hidden_layers)
        # generate prediction
        outputs, states = rnn.static_rnn(rnn_cell, X, dtype=tf.float32)

    # there are n_input outputs but
    # we only want the last output
    # return tf.matmul(outputs[-1], weights['out']) + biases['out']
    return tf.matmul(outputs[-1], weights) + biases
项目:WaNN    作者:TeoZosa    | 项目源码 | 文件源码
def RNN(layer_in, num_hidden_layers, num_hidden_units, num_inputs_in=155):
    layer_in = tf.reshape(layer_in, [-1, 8 * 8])

    n_features = layer_in.get_shape().as_list()[1]
    num_inputs_in = 155
    num_classes = 155
    # reshape to [1, n_input]
    X = tf.reshape(layer_in, [-1, n_features])

    # Generate a n_input-element sequence of inputs
    # (eg. [had] [a] [general] -> [20] [6] [33])
    X = tf.split(X, n_features, 1)

    # 1-layer LSTM with n_hidden units.

    # rnn_cell = rnn.BasicLSTMCell(num_hidden)

    rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(num_hidden_units)] * num_hidden_layers)


    # generate prediction
    outputs, states = rnn.static_rnn(rnn_cell, X, dtype=tf.float32)

    # there are n_input outputs but
    # we only want the last output
    weights = {
        'out': tf.Variable(tf.random_normal([num_hidden_units, num_classes]))
    }
    biases = {
        'out': tf.Variable(tf.random_normal([num_classes]))
    }
    return tf.matmul(outputs[-1], weights['out']) + biases['out']
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def feed_network(self,data,keep_prob,chunk_size,n_chunks,dynamic):

        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        weight_dropout1 = tf.nn.dropout(self._hidden_layer_1['weights'], keep_prob)
        weight_dropout2 = tf.nn.dropout(self._hidden_layer_2['weights'], keep_prob)
        output_dropout = tf.nn.dropout(self._output['weights'], keep_prob)
        rnn_dropout1 = rnn.core_rnn_cell.DropoutWrapper(self._lstm_cell,output_keep_prob=keep_prob)
        rnn_dropout2 = rnn.core_rnn_cell.DropoutWrapper(self._gru_cell,output_keep_prob=keep_prob)
        batch_size = tf.shape(data)[0]

        #begin Calculations
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        lstm_outputs, states = rnn.static_rnn(rnn_dropout1, sequence, dtype=tf.float32, sequence_length = sequence_lengths)
        if dynamic:
            lstm_outputs = tf.transpose(tf.stack(lstm_outputs), [1, 0, 2])
            lstm_output = net.advanced_indexing_op(lstm_outputs, sequence_lengths)
        else:
            lstm_output = lstm_outputs[-1]

        layer1 = tf.add(tf.matmul(lstm_output,weight_dropout1),self._hidden_layer_1['biases'])
        layer1 = tf.nn.relu(layer1)

        layer2 = tf.add(tf.matmul(layer1,weight_dropout2),self._hidden_layer_2['biases'])
        layer2 = tf.nn.relu(layer2)
        input_to_gru = tf.reshape(layer2,[batch_size,n_chunks,10])
        ndim = input_to_gru.get_shape().as_list()
        chunk_size = ndim[-1]
        input_to_gru = tf.transpose(input_to_gru,[1,0,2])
        input_to_gru = tf.reshape(input_to_gru,[-1,chunk_size])
        input_to_gru = tf.split(input_to_gru,n_chunks,0)

        gru_outputs, state = rnn.static_rnn(rnn_dropout2, input_to_gru, dtype=tf.float32, sequence_length = sequence_lengths)

        if dynamic:
            gru_outputs = tf.transpose(tf.stack(gru_outputs), [1, 0, 2])
            gru_output = net.advanced_indexing_op(gru_outputs, sequence_lengths)
        else:
            gru_output = gru_outputs[-1]

        output = tf.add(tf.matmul(gru_output,output_dropout),self._output['biases'])
        return output
项目:Relation-Network    作者:juung    | 项目源码 | 文件源码
def contextLSTM(self, c, l, c_real_len, reuse = False, scope = "ContextLSTM"):

        def sentenceLSTM(s,
                         s_real_len,
                         reuse = reuse,
                         scope = "sentenceLSTM"):
            """
            embedding sentence

            Arguments
                s: sentence (word index list), shape = [batch_size*20, 12]
                s_real_len: length of the sentence before zero padding, int32

            Returns
                embedded_s: embedded sentence, shape = [batch_size*20, 32]
            """
            embedded_sentence_word = tf.nn.embedding_lookup(self.c_word_embed_matrix, s)
            s_input = tf.unstack(embedded_sentence_word, num = self.s_max_len, axis = 1)
            lstm_cell = rnn.BasicLSTMCell(self.s_hidden, reuse = reuse)
            outputs, _ = rnn.static_rnn(lstm_cell, s_input, dtype = tf.float32, scope = scope)

            outputs = tf.stack(outputs)
            outputs = tf.transpose(outputs, [1,0,2])
            index = tf.range(0, self.batch_size* self.c_max_len) * (self.s_max_len) + (s_real_len - 1)
            outputs = tf.gather(tf.reshape(outputs, [-1, self.s_hidden]), index)
            return outputs

        """
        Args
            c: list of sentences, shape = [batch_size, 20, 12]
            l: list of labels, shape = [batch_size, 20, 20]
            c_real_len: list of real length, shape = [batch_size, 20]

        Returns
            tagged_c_objects: list of embedded sentence + label, shape = [batch_size, 52] 20?
            len(tagged_c_objects) = 20
        """
        sentences = tf.reshape(c, shape = [-1, self.s_max_len])
        real_lens = tf.reshape(c_real_len, shape= [-1])
        labels = tf.reshape(l, shape = [-1, self.c_max_len])

        s_embedded = sentenceLSTM(sentences, real_lens, reuse = reuse)
        c_embedded = tf.concat([s_embedded, labels], axis=1)
        c_embedded = tf.reshape(c_embedded, shape = [self.batch_size, self.c_max_len, self.c_max_len + self.c_word_embed])
        tagged_c_objects = tf.unstack(c_embedded, axis=1)
        return tagged_c_objects
项目:rnn-nlu    作者:HadoopIt    | 项目源码 | 文件源码
def generate_rnn_output(self):
    """
    Generate RNN state outputs with word embeddings as inputs
    """
    with tf.variable_scope("generate_seq_output"):
      if self.bidirectional_rnn:
        embedding = tf.get_variable("embedding",
                                    [self.source_vocab_size,
                                     self.word_embedding_size])
        encoder_emb_inputs = list()
        encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input)\
                                for encoder_input in self.encoder_inputs]
        rnn_outputs = static_bidirectional_rnn(self.cell_fw,
                                               self.cell_bw, 
                                               encoder_emb_inputs, 
                                               sequence_length=self.sequence_length,
                                               dtype=tf.float32)
        encoder_outputs, encoder_state_fw, encoder_state_bw = rnn_outputs
        # with state_is_tuple = True, if num_layers > 1, 
        # here we simply use the state from last layer as the encoder state
        state_fw = encoder_state_fw[-1]
        state_bw = encoder_state_bw[-1]
        encoder_state = tf.concat([tf.concat(state_fw, 1),
                                   tf.concat(state_bw, 1)], 1)
        top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size \
                                  + self.cell_bw.output_size])
                      for e in encoder_outputs]
        attention_states = tf.concat(top_states, 1)
      else:
        embedding = tf.get_variable("embedding", 
                                    [self.source_vocab_size,
                                     self.word_embedding_size])
        encoder_emb_inputs = list()
        encoder_emb_inputs = [tf.nn.embedding_lookup(embedding, encoder_input)\
                              for encoder_input in self.encoder_inputs] 
        rnn_outputs = static_rnn(self.cell_fw,
                                 encoder_emb_inputs,
                                 sequence_length=self.sequence_length,
                                 dtype=tf.float32)
        encoder_outputs, encoder_state = rnn_outputs
        # with state_is_tuple = True, if num_layers > 1, 
        # here we use the state from last layer as the encoder state
        state = encoder_state[-1]
        encoder_state = tf.concat(state, 1)
        top_states = [tf.reshape(e, [-1, 1, self.cell_fw.output_size])
                      for e in encoder_outputs]
        attention_states = tf.concat(top_states, 1)
      return encoder_outputs, encoder_state, attention_states