Python tensorflow.python.ops.array_ops 模块,split() 实例源码

我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.split()

项目:website-fingerprinting    作者:AxelGoetz    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with _checked_scope(self, scope or "gru_cell"):
      with vs.variable_scope("gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        value = sigmoid(_linear(
          [inputs, state], 2 * self._num_units, True, 1.0))
        r, u = array_ops.split(
            value=value,
            num_or_size_splits=2,
            axis=1)
      with vs.variable_scope("candidate"):
        res = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

        if self._batch_norm:
          c = batch_norm(res,
                         center=True, scale=True,
                         is_training=self._is_training,
                         scope='bn1')
        else:
          c = res

      new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def _build(self, inputs, state):
    hidden, cell = state
    input_conv = self._convolutions["input"]
    hidden_conv = self._convolutions["hidden"]
    next_hidden = input_conv(inputs) + hidden_conv(hidden)
    gates = tf.split(value=next_hidden, num_or_size_splits=4,
                     axis=self._conv_ndims+1)

    input_gate, next_input, forget_gate, output_gate = gates
    next_cell = tf.sigmoid(forget_gate + self._forget_bias) * cell
    next_cell += tf.sigmoid(input_gate) * tf.tanh(next_input)
    output = tf.tanh(next_cell) * tf.sigmoid(output_gate)

    if self._skip_connection:
      output = tf.concat([output, inputs], axis=-1)
    return output, (output, next_cell)
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def __call__(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = rnn_cell_impl._linear([inputs, state], 2 * self._num_units, True, bias_ones,\
                  self._kernel_initializer)
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
      r,u=layer_normalization(r,scope="r/"),layer_normalization(u,scope="u/")
      r,u=math_ops.sigmoid(r),math_ops.sigmoid(u)
    with vs.variable_scope("candidate"):
      c = self._activation(rnn_cell_impl._linear([inputs, r * state], self._num_units, True, self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def _weighted_gini(self, class_counts):
    """Our split score is the Gini impurity times the number of examples.

    If c(i) denotes the i-th class count and c = sum_i c(i) then
      score = c * (1 - sum_i ( c(i) / c )^2 )
            = c - sum_i c(i)^2 / c
    Args:
      class_counts: A 2-D tensor of per-class counts, usually a slice or
        gather from variables.node_sums.

    Returns:
      A 1-D tensor of the Gini impurities for each row in the input.
    """
    smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
    sums = math_ops.reduce_sum(smoothed, 1)
    sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)

    return sums - sum_squares / sums
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def set_value(x, value):
      """Sets the value of a variable, from a Numpy array.

      Arguments:
          x: Tensor to set to a new value.
          value: Value to set the tensor to, as a Numpy array
              (of the same shape).
      """
      value = np.asarray(value)
      tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
      if hasattr(x, '_assign_placeholder'):
        assign_placeholder = x._assign_placeholder
        assign_op = x._assign_op
      else:
        assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
        assign_op = x.assign(assign_placeholder)
        x._assign_placeholder = assign_placeholder
        x._assign_op = assign_op
      get_session().run(assign_op, feed_dict={assign_placeholder: value})
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def batch_set_value(tuples):
      """Sets the values of many tensor variables at once.

      Arguments:
          tuples: a list of tuples `(tensor, value)`.
              `value` should be a Numpy array.
      """
      if tuples:
        assign_ops = []
        feed_dict = {}
        for x, value in tuples:
          value = np.asarray(value)
          tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
          if hasattr(x, '_assign_placeholder'):
            assign_placeholder = x._assign_placeholder
            assign_op = x._assign_op
          else:
            assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
            assign_op = x.assign(assign_placeholder)
            x._assign_placeholder = assign_placeholder
            x._assign_op = assign_op
          assign_ops.append(assign_op)
          feed_dict[assign_placeholder] = value
        get_session().run(assign_ops, feed_dict=feed_dict)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def texts_to_sequences_generator(self, texts):
            """Transforms each text in texts in a sequence of integers.

            Only top "num_words" most frequent words will be taken into account.
            Only words known by the tokenizer will be taken into account.

            Arguments:
                texts: A list of texts (strings).

            Yields:
                Yields individual sequences.
            """
            num_words = self.num_words
            for text in texts:
              seq = text if self.char_level else text_to_word_sequence(
                  text, self.filters, self.lower, self.split)
              vect = []
              for w in seq:
                i = self.word_index.get(w)
                if i is not None:
                  if num_words and i >= num_words:
                    continue
                  else:
                    vect.append(i)
              yield vect
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _weighted_gini(self, class_counts):
    """Our split score is the Gini impurity times the number of examples.

    If c(i) denotes the i-th class count and c = sum_i c(i) then
      score = c * (1 - sum_i ( c(i) / c )^2 )
            = c - sum_i c(i)^2 / c
    Args:
      class_counts: A 2-D tensor of per-class counts, usually a slice or
        gather from variables.node_sums.

    Returns:
      A 1-D tensor of the Gini impurities for each row in the input.
    """
    smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
    sums = math_ops.reduce_sum(smoothed, 1)
    sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)

    return sums - sum_squares / sums
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _weighted_gini(self, class_counts):
    """Our split score is the Gini impurity times the number of examples.

    If c(i) denotes the i-th class count and c = sum_i c(i) then
      score = c * (1 - sum_i ( c(i) / c )^2 )
            = c - sum_i c(i)^2 / c
    Args:
      class_counts: A 2-D tensor of per-class counts, usually a slice or
        gather from variables.node_sums.

    Returns:
      A 1-D tensor of the Gini impurities for each row in the input.
    """
    smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
    sums = math_ops.reduce_sum(smoothed, 1)
    sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)

    return sums - sum_squares / sums
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __call__(self, inputs, state, mask, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = array_ops.split(1, 2, state)
      concat = linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
      mask = array_ops.expand_dims(mask, 1)
      new_c =  mask * new_c + (1. - mask) * c

      new_h = tanh(new_c) * sigmoid(o)
      new_h = mask * new_h + (1. - mask) * h

    return new_h, array_ops.concat(1, [new_c, new_h])
项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)

      # batch_size * height * width * channel
      concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(3, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
项目:R-net    作者:minsangkim142    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Run one step of SRU."""
        with tf.variable_scope(scope or type(self).__name__):  # "SRUCell"
            with tf.variable_scope("x_hat"):
                x = linear([inputs], self._num_units, False)
            with tf.variable_scope("gates"):
                concat = tf.sigmoid(linear([inputs], 2 * self._num_units, True))
                f, r = tf.split(concat, 2, axis = 1)
            with tf.variable_scope("candidates"):
                c = self._activation(f * state + (1 - f) * x)
                # variational dropout as suggested in the paper (disabled)
                # if self._is_training and Params.dropout is not None:
                #     c = tf.nn.dropout(c, keep_prob = 1 - Params.dropout)
            # highway connection
            # Our implementation is slightly different to the paper
            # https://arxiv.org/abs/1709.02755 in a way that highway network
            # uses x_hat instead of the cell inputs. Check equation (7) from the original
            # paper for SRU.
            h = r * c + (1 - r) * x
        return h, c
项目:R-net    作者:minsangkim142    | 项目源码 | 文件源码
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    #   recurrent dropout as proposed in https://arxiv.org/pdf/1603.05118.pdf (currently disabled)
      #if self._is_training and Params.dropout is not None:
        #c = tf.nn.dropout(c, 1 - Params.dropout)
    new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u, g = array_ops.split(1, 3, _linear([inputs, state],
                                             3 * self._num_units, True, 1.0))
        r, u, g = sigmoid(r), sigmoid(u), sigmoid(g)
      with vs.variable_scope("Candidate"):
        c = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))
      new_h = u * state + (1 - u) * c

      eps = 1e-13
      temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(new_h, state),1), \
                          math_ops.reduce_sum(math_ops.mul(state,state),1) + eps)

      m = array_ops.transpose(g)

      t1 = math_ops.mul(m , temp)
      t1 = array_ops.transpose(t1) 

      distract_h = new_h  -  state * t1
    return distract_h, distract_h
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"

      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u = array_ops.split(1, 2, _linear([inputs, state],
                                  2 * self._num_units, True, 1.0))
        r, u = sigmoid(r), sigmoid(u)

      with vs.variable_scope("Candidate"):
        c = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

      new_h = u * state + (1 - u) * c

      distract_h = new_h  -  state

    return distract_h, distract_h
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(1, 2, state)
      concat = _linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(1, [new_c, new_h])
      return new_h, new_state
项目:Dynamic-Memory-Networks-in-TensorFlow    作者:barronalex    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Attention GRU with nunits cells."""
        with vs.variable_scope(scope or "attention_gru_cell"):
            with vs.variable_scope("gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset and not update.
                if inputs.get_shape()[-1] != self._num_units + 1:
                    raise ValueError("Input should be passed as word input concatenated with 1D attention on end axis")
                # extract input vector and attention
                inputs, g = array_ops.split(inputs,
                        num_or_size_splits=[self._num_units,1],
                        axis=1)
                r = _linear([inputs, state], self._num_units, True)
                r = sigmoid(r)
            with vs.variable_scope("candidate"):
                r = r*_linear(state, self._num_units, False)
            with vs.variable_scope("input"):
                x = _linear(inputs, self._num_units, True)
            h_hat = self._activation(r + x)

            new_h = (1 - g) * state + g * h_hat
        return new_h, new_h
项目:ROLO    作者:Guanghan    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(1, 2, state)
      concat = _linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(1, [new_c, new_h])
      return new_h, new_state
项目:Conv3D_CLSTM    作者:GuangmingZhu    | 项目源码 | 文件源码
def __call__(self, inputs, state, k_size=3, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)

      # batch_size * height * width * channel
      concat = _conv([inputs, h], 4 * self._num_units, k_size, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(3, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testExtremeThresholds(self):
    with self.test_session() as sess:
      predictions = constant_op.constant(
          [1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
      labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
      thresholds = [-1.0, 2.0]  # lower/higher than any values
      prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
                                                                labels,
                                                                thresholds)
      rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
                                                           thresholds)

      [prec_low, prec_high] = array_ops.split(
          value=prec, num_or_size_splits=2, axis=0)
      [rec_low, rec_high] = array_ops.split(
          value=rec, num_or_size_splits=2, axis=0)

      sess.run(variables.local_variables_initializer())
      sess.run([prec_op, rec_op])

      self.assertAlmostEqual(0.75, prec_low.eval())
      self.assertAlmostEqual(0.0, prec_high.eval())
      self.assertAlmostEqual(1.0, rec_low.eval())
      self.assertAlmostEqual(0.0, rec_high.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testSplit(self):
    for dtype in self.numeric_types:
      self._testBinary(
          lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
          np.int32(0),
          np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
                   dtype=dtype),
          expected=[
              np.array([[[1], [2]]], dtype=dtype),
              np.array([[[3], [4]]], dtype=dtype),
              np.array([[[5], [6]]], dtype=dtype),
          ],
          equality_test=self.ListsAreClose)

      self._testBinary(
          lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
          np.int32(1),
          np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
                   dtype=dtype),
          expected=[
              np.array([[[1]], [[3]], [[5]]], dtype=dtype),
              np.array([[[2]], [[4]], [[6]]], dtype=dtype),
          ],
          equality_test=self.ListsAreClose)
项目:unreasonable-counts    作者:tokestermw    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    with vs.variable_scope(scope or type(self).__name__):  # "SCRNNCell"

      if self._state_is_tuple:
        s, h = state
      else:
        s, h = array_ops.split(1, 2, state)

      new_s = tf.nn.rnn_cell._linear([(1 - self._alpha) * inputs, self._alpha * s], self._num_units, True, scope="SlowLinear")  
      new_h = tanh(tf.nn.rnn_cell._linear([inputs, new_s, h], self._num_units, True, scope="FastLinear"))

      if self._state_is_tuple:
        new_state = tf.nn.rnn_cell.LSTMStateTuple(new_s, new_h)
      else:
        new_state = array_ops.concat(1, [new_s, new_h])

      return new_h, new_state
项目:dnnQuery    作者:richardxiong    | 项目源码 | 文件源码
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          _linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          _linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:dnnQuery    作者:richardxiong    | 项目源码 | 文件源码
def call(self, inputs, state):
    """Long short-term memory cell (LSTM)."""
    sigmoid = math_ops.sigmoid
    # Parameters of gates are concatenated into one multiply for efficiency.
    if self._state_is_tuple:
      c, h = state
    else:
      c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)

    concat = _linear([inputs, h], 4 * self._num_units, True)

    # i = input_gate, j = new_input, f = forget_gate, o = output_gate
    i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)

    new_c = (
        c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
    new_h = self._activation(new_c) * sigmoid(o)

    if self._state_is_tuple:
      new_state = LSTMStateTuple(new_c, new_h)
    else:
      new_state = array_ops.concat([new_c, new_h], 1)
    return new_h, new_state
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def __call__(self, x, h_prev, scope=None):
        with tf.variable_scope(scope or type(self).__name__):

            h_prev = h_prev[0]

            # Check if the input size exist.
            input_size = x.shape.with_rank(2)[1].value
            if input_size is None:
                raise ValueError("Expecting input_size to be set.")

            ### get weights.
            W_shape = (input_size, self.output_size)
            U_shape = (self.output_size, self.output_size)
            b_shape = (self.output_size,)
            Wrz = tf.get_variable(name="Wrz", shape=(input_size, 2 * self.output_size))
            Wh = tf.get_variable(name='Wh', shape=W_shape)
            Urz = tf.get_variable(name="Urz", shape=(self.output_size, 2 * self.output_size),
                                  initializer=TFCommon.Initializer.random_orthogonal_initializer())
            Uh = tf.get_variable(name='Uh', shape=U_shape,
                                 initializer=TFCommon.Initializer.random_orthogonal_initializer())
            brz = tf.get_variable(name="brz", shape=(2 * self.output_size),
                                  initializer=tf.constant_initializer(0.0))
            bh = tf.get_variable(name='bh', shape=b_shape,
                                 initializer=tf.constant_initializer(0.0))

            ### calculate r and z
            rz = self.__gate_activation(tf.matmul(x, Wrz) + tf.matmul(h_prev, Urz) + brz)
            r, z = array_ops.split(rz, num_or_size_splits=2, axis=1)

            ### calculate candidate
            h_slash = tf.tanh(tf.matmul(x, Wh) + tf.matmul(r * h_prev, Uh) + bh)

            ### final cal
            new_h = (1-z) * h_prev + z * h_slash

            return new_h, tuple([new_h])
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        with tf.variable_scope(scope or type(self).__name__):  # "GRUCell"
            with tf.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset and not update.
                _w = _linear([inputs, state],
                    2 * self._num_units, True, 1.0, self.weights_init,
                    self.trainable, self.restore, self.reuse)
                r, u = array_ops.split(value=_w, num_or_size_splits=2, axis=1)
                r, u = self._inner_activation(r), self._inner_activation(u)
            with tf.variable_scope("Candidate"):
                c = self._activation(
                    _linear([inputs, r * state], self._num_units, True, 0.,
                            self.weights_init, self.trainable, self.restore,
                            self.reuse))
            new_h = u * state + (1 - u) * c

            self.W, self.b = list(), list()
            # Retrieve RNN Variables
            with tf.variable_scope('Gates/Linear', reuse=True):
                self.W.append(tf.get_variable('Matrix'))
                self.b.append(tf.get_variable('Bias'))
            with tf.variable_scope('Candidate/Linear', reuse=True):
                self.W.append(tf.get_variable('Matrix'))
                self.b.append(tf.get_variable('Bias'))

        return new_h, new_h
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def _bag_features(self, tree_num, input_data):
    split_data = array_ops.split(1, self.params.num_features, input_data)
    return array_ops.concat(
        1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def repeat_elements(x, rep, axis):
      """Repeats the elements of a tensor along an axis, like `np.repeat`.

      If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
      will have shape `(s1, s2 * rep, s3)`.

      Arguments:
          x: Tensor or variable.
          rep: Python integer, number of times to repeat.
          axis: Axis along which to repeat.

      Raises:
          ValueError: In case `x.shape[axis]` is undefined.

      Returns:
          A tensor.
      """
      x_shape = x.get_shape().as_list()
      if x_shape[axis] is None:
        raise ValueError('Axis ' + str(axis) + ' of input tensor '
                         'should have a defined dimension, but is None. '
                         'Full tensor shape: ' + str(tuple(x_shape)) + '. '
                         'Typically you need to pass a fully-defined '
                         '`input_shape` argument to your first layer.')
      # slices along the repeat axis
      splits = array_ops.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)
      # repeat each slice the given number of reps
      x_rep = [s for s in splits for _ in range(rep)]
      return concatenate(x_rep, axis)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def fit_on_texts(self, texts):
            """Updates internal vocabulary based on a list of texts.

            Required before using `texts_to_sequences` or `texts_to_matrix`.

            Arguments:
                texts: can be a list of strings,
                    or a generator of strings (for memory-efficiency)
            """
            self.document_count = 0
            for text in texts:
              self.document_count += 1
              seq = text if self.char_level else text_to_word_sequence(
                  text, self.filters, self.lower, self.split)
              for w in seq:
                if w in self.word_counts:
                  self.word_counts[w] += 1
                else:
                  self.word_counts[w] = 1
              for w in set(seq):
                if w in self.word_docs:
                  self.word_docs[w] += 1
                else:
                  self.word_docs[w] = 1

            wcounts = list(self.word_counts.items())
            wcounts.sort(key=lambda x: x[1], reverse=True)
            sorted_voc = [wc[0] for wc in wcounts]
            # note that index 0 is reserved, never assigned to an existing word
            self.word_index = dict(
                list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))

            self.index_docs = {}
            for w, c in list(self.word_docs.items()):
              self.index_docs[self.word_index[w]] = c
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def load_data(path='boston_housing.npz', seed=113, test_split=0.2):
          """Loads the Boston Housing dataset.

          Arguments:
              path: path where to cache the dataset locally
                  (relative to ~/.keras/datasets).
              seed: Random seed for shuffling the data
                  before computing the test split.
              test_split: fraction of the data to reserve as test set.

          Returns:
              Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
          """
          assert 0 <= test_split < 1
          fh = 'f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5'
          path = get_file(
              path,
              origin='https://s3.amazonaws.com/keras-datasets/boston_housing.npz',
              file_hash=fh)
          f = np.load(path)
          x = f['x']
          y = f['y']
          f.close()

          np.random.seed(seed)
          np.random.shuffle(x)
          np.random.seed(seed)
          np.random.shuffle(y)

          x_train = np.array(x[:int(len(x) * (1 - test_split))])
          y_train = np.array(y[:int(len(x) * (1 - test_split))])
          x_test = np.array(x[int(len(x) * (1 - test_split)):])
          y_test = np.array(y[int(len(x) * (1 - test_split)):])
          return (x_train, y_train), (x_test, y_test)
项目:joint-many-task-model    作者:rubythonode    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """LSTM as mentioned in paper."""
        with vs.variable_scope(scope or "basic_lstm_cell"):
            # Parameters of gates are concatenated into one multiply for
            # efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = array_ops.split(
                    value=state, num_or_size_splits=2, split_dim=1)
            g = tf.concat(1, [inputs, h])
            concat = linear([g], 4 * self._num_units, True, scope=scope)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(
                value=concat, num_split=4, split_dim=1)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
                     self._activation(j))
            new_h = self._activation(new_c) * sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(new_c, new_h)
            else:
                new_state = array_ops.concat_v2([new_c, new_h], 1)
            return new_h, new_state
项目:chinese-char-rnn    作者:indiejoseph    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    with _checked_scope(self, scope or "ran_cell", reuse=self._reuse):
      with vs.variable_scope("gates"):
        value = tf.nn.sigmoid(linear([state, inputs], 2 * self._num_units, True, normalize=self._normalize))
        i, f = array_ops.split(value=value, num_or_size_splits=2, axis=1)

      with vs.variable_scope("candidate"):
        c = linear([inputs], self._num_units, True, normalize=self._normalize)

      new_c = i * c + f * state
      new_h = self._activation(c)

    return new_h, new_c
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _bag_features(self, tree_num, input_data):
    split_data = array_ops.split(1, self.params.num_features, input_data)
    return array_ops.concat(
        1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, num_units, use_peepholes=False,
               cell_clip=None, initializer=None,
               num_unit_shards=1, forget_bias=1.0,
               feature_size=None, frequency_skip=None):
    """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      cell_clip: (optional) A float value, if provided the cell state is clipped
        by this value prior to the cell output activation.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_unit_shards: int, How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      forget_bias: float, Biases of the forget gate are initialized by default
        to 1 in order to reduce the scale of forgetting at the beginning
        of the training.
      feature_size: int, The size of the input feature the LSTM spans over.
      frequency_skip: int, The amount the LSTM filter is shifted by in
        frequency.
    """
    self._num_units = num_units
    self._use_peepholes = use_peepholes
    self._cell_clip = cell_clip
    self._initializer = initializer
    self._num_unit_shards = num_unit_shards
    self._forget_bias = forget_bias
    self._feature_size = feature_size
    self._frequency_skip = frequency_skip
    self._state_size = 2 * num_units
    self._output_size = num_units
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """LSTM cell with layer normalization and recurrent dropout."""

    with vs.variable_scope(scope or type(self).__name__) as scope:  # LayerNormBasicLSTMCell  # pylint: disable=unused-variables
      c, h = state
      args = array_ops.concat(1, [inputs, h])
      concat = self._linear(args)

      i, j, f, o = array_ops.split(1, 4, concat)
      if self._layer_norm:
        i = self._norm(i, "input")
        j = self._norm(j, "transform")
        f = self._norm(f, "forget")
        o = self._norm(o, "output")

      g = self._activation(j)
      if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
        g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

      new_c = (c * math_ops.sigmoid(f + self._forget_bias)
               + math_ops.sigmoid(i) * g)
      if self._layer_norm:
        new_c = self._norm(new_c, "state")
      new_h = self._activation(new_c) * math_ops.sigmoid(o)

      new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
      return new_h, new_state
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, num_units, use_peepholes=False,
               cell_clip=None, initializer=None,
               num_unit_shards=1, forget_bias=1.0,
               feature_size=None, frequency_skip=None):
    """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      cell_clip: (optional) A float value, if provided the cell state is clipped
        by this value prior to the cell output activation.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_unit_shards: int, How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      forget_bias: float, Biases of the forget gate are initialized by default
        to 1 in order to reduce the scale of forgetting at the beginning
        of the training.
      feature_size: int, The size of the input feature the LSTM spans over.
      frequency_skip: int, The amount the LSTM filter is shifted by in
        frequency.
    """
    self._num_units = num_units
    self._use_peepholes = use_peepholes
    self._cell_clip = cell_clip
    self._initializer = initializer
    self._num_unit_shards = num_unit_shards
    self._forget_bias = forget_bias
    self._feature_size = feature_size
    self._frequency_skip = frequency_skip
    self._state_size = 2 * num_units
    self._output_size = num_units
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """LSTM cell with layer normalization and recurrent dropout."""

    with vs.variable_scope(scope or type(self).__name__) as scope:  # LayerNormBasicLSTMCell  # pylint: disable=unused-variables
      c, h = state
      args = array_ops.concat(1, [inputs, h])
      concat = self._linear(args)

      i, j, f, o = array_ops.split(1, 4, concat)
      if self._layer_norm:
        i = self._norm(i, "input")
        j = self._norm(j, "transform")
        f = self._norm(f, "forget")
        o = self._norm(o, "output")

      g = self._activation(j)
      if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
        g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

      new_c = (c * math_ops.sigmoid(f + self._forget_bias)
               + math_ops.sigmoid(i) * g)
      if self._layer_norm:
        new_c = self._norm(new_c, "state")
      new_h = self._activation(new_c) * math_ops.sigmoid(o)

      new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
      return new_h, new_state
项目:tf-ran-cell    作者:indiejoseph    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    with _checked_scope(self, scope or "ran_cell", reuse=self._reuse):
      with vs.variable_scope("gates"):
        c, h = state
        gates = tf.nn.sigmoid(linear([inputs, h], 2 * self._num_units, True, normalize=self._normalize))
        i, f = array_ops.split(value=gates, num_or_size_splits=2, axis=1)

      with vs.variable_scope("candidate"):
        content = linear([inputs], self._num_units, True, normalize=self._normalize)

      new_c = i * content + f * c
      new_h = self._activation(c)
      new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
      output = new_h
    return output, new_state
项目:tf-ran-cell    作者:indiejoseph    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    with _checked_scope(self, scope or "ran_cell", reuse=self._reuse):
      with vs.variable_scope("gates"):
        value = tf.nn.sigmoid(linear([state, inputs], 2 * self._num_units, True, normalize=self._normalize))
        i, f = array_ops.split(value=value, num_or_size_splits=2, axis=1)

      with vs.variable_scope("candidate"):
        c = linear([inputs], self._num_units, True, normalize=self._normalize)

      new_c = i * c + f * state
      new_h = self._activation(c)

    return new_h, new_c
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        with tf.variable_scope(scope or type(self).__name__):  # "GRUCell"
            with tf.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset and not update.
                r, u = array_ops.split(_linear([inputs, state],
                                        2 * self._num_units, True, 1.0), 2, 1)
                r, u = sigmoid(r), sigmoid(u)
            with tf.variable_scope("Candidate"):
                c = self._activation(_linear([inputs, r * state],
                                            self._num_units, True))
            new_h = u * state + (1 - u) * c
        return new_h, new_h
项目:GORU-tensorflow    作者:jingli9111    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "goru_cell"):

            U_init = init_ops.random_uniform_initializer(-0.01, 0.01)
            b_init = init_ops.constant_initializer(2.)
            mod_b_init = init_ops.constant_initializer(0.01)

            U = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size * 3], dtype=tf.float32, initializer = U_init)
            Ux = math_ops.matmul(inputs, U)
            U_cx, U_rx, U_gx = array_ops.split(Ux, 3, axis=1)

            W_r = vs.get_variable("W_r", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer = U_init)
            W_g = vs.get_variable("W_g", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer = U_init)
            W_rh = math_ops.matmul(state, W_r)
            W_gh = math_ops.matmul(state, W_g)

            bias_r = vs.get_variable("bias_r", [self._hidden_size], dtype=tf.float32, initializer = b_init)
            bias_g = vs.get_variable("bias_g", [self._hidden_size], dtype=tf.float32)
            bias_c = vs.get_variable("bias_c", [self._hidden_size], dtype=tf.float32, initializer = mod_b_init)


            r_tmp = U_rx + W_rh + bias_r
            g_tmp = U_gx + W_gh + bias_g
            r = math_ops.sigmoid(r_tmp)

            g = math_ops.sigmoid(g_tmp)

            Unitaryh = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)
            c = modrelu(math_ops.multiply(r, Unitaryh) + U_cx, bias_c, False)
            new_state = math_ops.multiply(g, state) +  math_ops.multiply(1 - g, c)

        return new_state, new_state
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u = array_ops.split(1, 2, linear([inputs, state],
                                            2 * self._num_units, True, 1.0))
        r, u = sigmoid(r), sigmoid(u)
      with vs.variable_scope("Candidate"):
        c = tanh(linear([inputs, r * state], self._num_units, True))
      new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = array_ops.split(1, 2, state)
      concat = linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
      new_h = tanh(new_c) * sigmoid(o)

    return new_h, array_ops.concat(1, [new_c, new_h])
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __init__(self, num_units, input_size=None,
               use_peepholes=False, cell_clip=None,
               initializer=None, num_proj=None,
               num_unit_shards=1, num_proj_shards=1):
    """Initialize the parameters for an LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell
      input_size: int, The dimensionality of the inputs into the LSTM cell
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      cell_clip: (optional) A float value, if provided the cell state is clipped
        by this value prior to the cell output activation.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      num_unit_shards: How to split the weight matrix.  If >1, the weight
        matrix is stored across num_unit_shards.
      num_proj_shards: How to split the projection matrix.  If >1, the
        projection matrix is stored across num_proj_shards.
    """
    self._num_units = num_units
    self._input_size = num_units if input_size is None else input_size
    self._use_peepholes = use_peepholes
    self._cell_clip = cell_clip
    self._initializer = initializer
    self._num_proj = num_proj
    self._num_unit_shards = num_unit_shards
    self._num_proj_shards = num_proj_shards

    if num_proj:
      self._state_size = num_units + num_proj
      self._output_size = num_proj
    else:
      self._state_size = 2 * num_units
      self._output_size = num_units
项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u = array_ops.split(3, 2, _conv([inputs, state],
                                             2 * self._num_units, self._k_size, True, initializer=self._initializer))
        r, u = sigmoid(r), sigmoid(u)
      with vs.variable_scope("Candidate"):
        c = self._activation(_conv([inputs, r * state],
                                     self._num_units, self._k_size, True, initializer=self._initializer))
      new_h = u * state + (1 - u) * c
    return new_h, new_h
项目:Tensorflow-SegNet    作者:tkuanlun350    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)
      s1 = vs.get_variable("s1", initializer=tf.ones([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      s2 = vs.get_variable("s2", initializer=tf.ones([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      # s3 = vs.get_variable("s3", initializer=tf.ones([self._batch_size, self._num_units]), dtype=tf.float32)

      b1 = vs.get_variable("b1", initializer=tf.zeros([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      b2 = vs.get_variable("b2", initializer=tf.zeros([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
      # b3 = vs.get_variable("b3", initializer=tf.zeros([self._batch_size, self._num_units]), dtype=tf.float32)
      input_below_ = _conv([inputs], 4 * self._num_units, self._k_size, False, initializer=self._initializer, scope="out_1")
      input_below_ = ln(input_below_, s1, b1)
      state_below_ = _conv([h], 4 * self._num_units, self._k_size, False, initializer=self._initializer, scope="out_2")
      state_below_ = ln(state_below_, s2, b2)
      lstm_matrix = tf.add(input_below_, state_below_)

      i, j, f, o = array_ops.split(3, 4, lstm_matrix)

      # batch_size * height * width * channel
      # concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      # i, j, f, o = array_ops.split(3, 4, lstm_matrix)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
项目:Video-Captioning    作者:hehefan    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    with vs.variable_scope(scope or type(self).__name__):
      # define within cell constants/ counters used to control while loop for ACTStep
      if self.state_is_tuple:
        state = array_ops.concat(1, state)

      self.batch_size = tf.shape(inputs)[0]
      self.one_minus_eps = tf.fill([self.batch_size], tf.constant(1.0 - self.epsilon, dtype=tf.float32))
      prob = tf.fill([self.batch_size], tf.constant(0.0, dtype=tf.float32), "prob")
      counter = tf.zeros_like(prob, tf.float32, name="counter")
      acc_outputs = tf.fill([self.batch_size, self.output_size], 0.0, name='output_accumulator')
      acc_states = tf.zeros_like(state, tf.float32, name="state_accumulator")
      flag = tf.fill([self.batch_size], True, name="flag")

      pred = lambda flag, prob, counter, state, inputs, acc_outputs, acc_states: tf.reduce_any(flag)

      _, probs, iterations, _, _, output, next_state = control_flow_ops.while_loop(pred, self.act_step, loop_vars=[flag, prob, counter, state, inputs, acc_outputs, acc_states])

    self.ACT_remainder.append(1 - probs)
    self.ACT_iterations.append(iterations)

    if self.state_is_tuple:
      next_c, next_h = array_ops.split(1, 2, next_state)
      next_state = rnn_cell._LSTMStateTuple(next_c, next_h)

    return output, next_state
项目:rnn_sent    作者:bill-kalog    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with _checked_scope(
                self, scope or "attention_based_gru_cell", reuse=self._reuse):
            with vs.variable_scope("gates"):
                # We start with bias of 1.0 to not reset and not update.
                inputs, g_t = array_ops.split(
                    inputs, num_or_size_splits=[self._num_units, 1], axis=1)
                reset_gate = sigmoid(_linear(
                    [inputs, state], self._num_units, True, 1.0))
            with vs.variable_scope("candidate"):
                h_tilde = self._activation(_linear(
                    [inputs, reset_gate * state], self._num_units, True))
                new_h = g_t * h_tilde + (1 - g_t) * state
        return new_h, new_h