Python tensorflow.python.ops.array_ops 模块,sequence_mask() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.sequence_mask()

项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def __init__(self, attention_units, memory, sequence_length=None, time_major=True, mode=0):
        self.attention_units = attention_units
        self.enc_units = memory.get_shape()[-1].value

        if time_major:
            memory = tf.transpose(memory, perm=(1, 0, 2))

        self.enc_length = tf.shape(memory)[1]
        self.batch_size = tf.shape(memory)[0]
        self.mode = mode
        self.mask = array_ops.sequence_mask(sequence_length, self.enc_length, tf.float32) if sequence_length is not None else None

        self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units))

        # pre-compute Uahj to minimize the computational cost
        with tf.variable_scope('attention'):
            Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units))
        self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1, 1, 1, 1], "SAME")
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def __init__(self, attention_units, memory, sequence_length=None, time_major=True, mode=0):
        self.attention_units    = attention_units
        self.enc_units          = memory.get_shape()[-1].value

        if time_major:
            memory = tf.transpose(memory, perm=(1,0,2))

        self.enc_length = tf.shape(memory)[1]
        self.batch_size = tf.shape(memory)[0]
        self.mode = mode
        self.mask = array_ops.sequence_mask(sequence_length, self.enc_length) if sequence_length is not None else None
        self.tiny = -math.inf * tf.ones(shape=(self.batch_size, self.enc_length))

        self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units))
        ### pre-compute Uahj to minimize the computational cost
        with tf.variable_scope('attention'):
            Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units))
        self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1,1,1,1], "SAME")
项目:TFCommon    作者:MU94W    | 项目源码 | 文件源码
def __init__(self, attention_units, memory, sequence_length=None, time_major=True):
        self.attention_units = attention_units
        self.enc_units = memory.get_shape()[-1].value

        if time_major:
            memory = tf.transpose(memory, perm=(1, 0, 2))

        self.enc_length = tf.shape(memory)[1]
        self.batch_size = tf.shape(memory)[0]
        self.mask = array_ops.sequence_mask(sequence_length, self.enc_length, tf.float32) if sequence_length is not None else None

        self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, self.enc_units))
项目:Question-Answering    作者:MurtyShikhar    | 项目源码 | 文件源码
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
  if memory_sequence_length is None:
    return score
  message = ("All values in memory_sequence_length must greater than zero.")
  with ops.control_dependencies(
      [check_ops.assert_positive(memory_sequence_length, message=message)]):
    score_mask = array_ops.sequence_mask(
        memory_sequence_length, maxlen=array_ops.shape(score)[1])
    score_mask_values = score_mask_value * array_ops.ones_like(score)
    return array_ops.where(score_mask, score, score_mask_values)
项目:Tacotron    作者:MU94W    | 项目源码 | 文件源码
def __call__(self, inputs, sequence_length=None, is_training=True, time_major=None):
        assert time_major is not None, "[*] You must specify whether is time_major or not!"
        if time_major:
            inputs = tf.transpose(inputs, perm=(1,0,2))     # Use batch major data.
        assert inputs.get_shape()[-1] == self.proj_unit[1], "[!] input's shape is not the same as ConvProj's output!"

        ### for correctness.
        if sequence_length is not None:
            mask = tf.expand_dims(array_ops.sequence_mask(sequence_length, tf.shape(inputs)[1], tf.float32), -1)
            inputs = inputs * mask

        ConvBankWithPool    = Conv1dBankWithMaxPool(self.bank_K)
        ConvProj            = Conv1dProjection(self.proj_unit)
        Highway             = FCHighwayNet(self.highway_layers)
        rnn_cell_fw         = GRUCell(self.proj_unit[1])
        rnn_cell_bw         = GRUCell(self.proj_unit[1])

        ### calculate
        # conv net
        output_0 = ConvBankWithPool(inputs, is_training)

        ### for correctness.
        if sequence_length is not None:
            output_0 = output_0 * mask

        output_1 = ConvProj(output_0, is_training)
        # residual connect
        res_output = tf.identity(inputs) + output_1

        # highway net
        highway_output = Highway(res_output)

        # biGRU
        # batch major
        final_output, *_ = bidirectional_dynamic_rnn(rnn_cell_fw, rnn_cell_bw, highway_output, sequence_length=sequence_length, time_major=False, dtype=tf.float32)
        final_output = tf.concat(final_output, axis=-1)
        if time_major:
            final_output = tf.transpose(final_output, perm=(1,0,2))

        return final_output
项目:Tacotron    作者:MU94W    | 项目源码 | 文件源码
def __call__(self, inputs, sequence_length=None, is_training=True, time_major=None):
        assert time_major is not None, "[*] You must specify whether is time_major or not!"
        if time_major:
            inputs = tf.transpose(inputs, perm=(1,0,2))     # Use batch major data.
        assert inputs.get_shape()[-1] == self.proj_unit[1], "[!] input's shape is not the same as ConvProj's output!"

        ### for correctness.
        if sequence_length is not None:
            mask = tf.expand_dims(array_ops.sequence_mask(sequence_length, tf.shape(inputs)[1], tf.float32), -1)
            inputs = inputs * mask

        ConvBankWithPool    = Conv1dBankWithMaxPool(self.bank_K)
        ConvProj            = Conv1dProjection(self.proj_unit)
        Highway             = FCHighwayNet(self.highway_layers)
        rnn_cell_fw         = GRUCell(self.proj_unit[1])
        rnn_cell_bw         = GRUCell(self.proj_unit[1])

        ### calculate
        # conv net
        output_0 = ConvBankWithPool(inputs, is_training)

        ### for correctness.
        if sequence_length is not None:
            output_0 = output_0 * mask

        output_1 = ConvProj(output_0, is_training)
        # residual connect
        res_output = tf.identity(inputs) + output_1

        # highway net
        highway_output = Highway(res_output)

        # biGRU
        # batch major
        final_output, *_ = bidirectional_dynamic_rnn(rnn_cell_fw, rnn_cell_bw, highway_output, sequence_length=sequence_length, time_major=False, dtype=tf.float32)
        final_output = tf.concat(final_output, axis=-1)
        if time_major:
            final_output = tf.transpose(final_output, perm=(1,0,2))

        return final_output
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def mask_activations_and_labels(activations, labels, sequence_lengths):
  """Remove entries outside `sequence_lengths` and returned flattened results.

  Args:
    activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
    labels: Label values, shape `[batch_size, padded_length]`.
    sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
      length of each sequence. If `None`, then each sequence is unpadded.

  Returns:
    activations_masked: `logit` values with those beyond `sequence_lengths`
      removed for each batch. Batches are then concatenated. Shape
      `[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and
      shape `[batch_size * padded_length, k]` otherwise.
    labels_masked: Label values after removing unneeded entries. Shape
      `[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape
      `[batch_size * padded_length]` otherwise.
  """
  with ops.name_scope('mask_activations_and_labels',
                      values=[activations, labels, sequence_lengths]):
    labels_shape = array_ops.shape(labels)
    batch_size = labels_shape[0]
    padded_length = labels_shape[1]
    if sequence_lengths is None:
      flattened_dimension = padded_length * batch_size
      activations_masked = array_ops.reshape(activations,
                                             [flattened_dimension, -1])
      labels_masked = array_ops.reshape(labels, [flattened_dimension])
    else:
      mask = array_ops.sequence_mask(sequence_lengths, padded_length)
      activations_masked = array_ops.boolean_mask(activations, mask)
      labels_masked = array_ops.boolean_mask(labels, mask)
    return activations_masked, labels_masked
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def mask_activations_and_labels(activations, labels, sequence_lengths):
  """Remove entries outside `sequence_lengths` and returned flattened results.

  Args:
    activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
    labels: Label values, shape `[batch_size, padded_length]`.
    sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
      length of each sequence. If `None`, then each sequence is unpadded.

  Returns:
    activations_masked: `logit` values with those beyond `sequence_lengths`
      removed for each batch. Batches are then concatenated. Shape
      `[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and
      shape `[batch_size * padded_length, k]` otherwise.
    labels_masked: Label values after removing unneeded entries. Shape
      `[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape
      `[batch_size * padded_length]` otherwise.
  """
  with ops.name_scope('mask_activations_and_labels',
                      values=[activations, labels, sequence_lengths]):
    labels_shape = array_ops.shape(labels)
    batch_size = labels_shape[0]
    padded_length = labels_shape[1]
    if sequence_lengths is None:
      flattened_dimension = padded_length * batch_size
      activations_masked = array_ops.reshape(activations,
                                             [flattened_dimension, -1])
      labels_masked = array_ops.reshape(labels, [flattened_dimension])
    else:
      mask = array_ops.sequence_mask(sequence_lengths, padded_length)
      activations_masked = array_ops.boolean_mask(activations, mask)
      labels_masked = array_ops.boolean_mask(labels, mask)
    return activations_masked, labels_masked
项目:Tacotron    作者:MU94W    | 项目源码 | 文件源码
def __call__(self, inputs, sequence_length=None, is_training=True, time_major=None):
        assert time_major is not None, "[*] You must specify whether is time_major or not!"
        if time_major:
            inputs = tf.transpose(inputs, perm=(1, 0, 2))     # Use batch major data.
        assert inputs.get_shape()[-1] == self.proj_unit[1], "[!] input's shape is not the same as ConvProj's output!"

        ### for correctness.
        if sequence_length is not None:
            mask = tf.expand_dims(array_ops.sequence_mask(sequence_length, tf.shape(inputs)[1], tf.float32), -1)
            inputs = inputs * mask

        ConvBankWithPool    = Conv1dBankWithMaxPool(self.bank_K)
        ConvProj            = Conv1dProjection(self.proj_unit)
        Highway             = FCHighwayNet(self.highway_layers)
        cell                = GRUCell(self.proj_unit[1])
        fw_cell             = FusedRNNCellAdaptor(cell)
        bw_cell             = TimeReversedFusedRNN(fw_cell)

        ### calculate
        # conv net
        output_0 = ConvBankWithPool(inputs, is_training)

        ### for correctness.
        if sequence_length is not None:
            output_0 = output_0 * mask

        output_1 = ConvProj(output_0, is_training)
        # residual connect
        res_output = tf.identity(inputs) + output_1

        # highway net
        highway_output = Highway(res_output)

        # biGRU
        # time major
        bGRUinp = tf.transpose(highway_output, perm=(1, 0, 2))
        fw_out, _ = fw_cell(bGRUinp, sequence_length=sequence_length, scope="fw", dtype=tf.float32)
        bw_out, _ = bw_cell(bGRUinp, sequence_length=sequence_length, scope="bw", dtype=tf.float32)
        final_output = tf.concat([fw_out, bw_out], axis=-1)

        if not time_major:
            final_output = tf.transpose(final_output, perm=(1,0,2))

        return final_output