Python tensorflow 模块,not_equal() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.not_equal()

项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def get_label_costs(coder, dataset, labels, batch_size=100):
    """
    Return average cross entropy loss and class error rate on
    dataset by coder object with its current weights.
    """

    n_batches = dataset.shape[0] // batch_size
    error = 0.
    cost = 0.
    for index in range(n_batches):
        batch = dataset[index * batch_size : (index+1) * batch_size]
        labels_batch = labels[index * batch_size : (index+1) * batch_size]
        predicted = coder.get_hidden_values(batch)

        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=predicted,
                                                              labels=labels_batch)
        cost += tf.reduce_mean(loss).eval()

        bad_prediction = tf.not_equal(tf.argmax(predicted , 1), labels_batch)
        error += tf.reduce_mean(tf.cast(bad_prediction, tf.float32)).eval()

    return (cost / n_batches, error / n_batches)
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def bboxes_filter_labels(labels, bboxes,
                         out_labels=[], num_classes=np.inf,
                         scope=None):
    """Filter out labels from a collection. Typically used to get
    of DontCare elements. Also remove elements based on the number of classes.

    Return:
      labels, bboxes: Filtered elements.
    """
    with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
        mask = tf.greater_equal(labels, num_classes)
        for l in labels:
            mask = tf.logical_and(mask, tf.not_equal(labels, l))
        labels = tf.boolean_mask(labels, mask)
        bboxes = tf.boolean_mask(bboxes, mask)
        return labels, bboxes


# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
项目:complex_tf    作者:woodshop    | 项目源码 | 文件源码
def testCplxNotEqualGPU(self):
        shapes1 = [(5,4,3), (5,4), (1,), (5,)]
        shapes2 = [(5,4,3), (1,), (5,4), (5,)]
        for [sh0, sh1] in zip(shapes1, shapes2):
            x = (np.random.randn(np.prod(sh0)) +
                 1j*np.random.randn(np.prod(sh0))).astype(np.complex64)
            y = (np.random.randn(np.prod(sh1)) +
                 1j*np.random.randn(np.prod(sh1))).astype(np.complex64)
            if len(sh0) == 1:
                ix = np.random.permutation(
                    np.arange(np.prod(sh1)))[:np.prod(sh1)//2]
                y[ix] = x[0]
            elif len(sh1) == 1:
                ix = np.random.permutation(
                    np.arange(np.prod(sh0)))[:np.prod(sh0)//2]
                x[ix] = y[0]
            else:
                ix = np.random.permutation(
                    np.arange(np.prod(sh0)))[:np.prod(sh0)//2]
                x[ix] = y[ix]
            x = np.reshape(x, sh0)
            y = np.reshape(y, sh1)
            self._compareGpu(x, y, np.not_equal, tf.not_equal)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def bboxes_filter_labels(labels, bboxes,
                         out_labels=[], num_classes=np.inf,
                         scope=None):
    """Filter out labels from a collection. Typically used to get
    of DontCare elements. Also remove elements based on the number of classes.

    Return:
      labels, bboxes: Filtered elements.
    """
    with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
        mask = tf.greater_equal(labels, num_classes)
        for l in labels:
            mask = tf.logical_and(mask, tf.not_equal(labels, l))
        labels = tf.boolean_mask(labels, mask)
        bboxes = tf.boolean_mask(bboxes, mask)
        return labels, bboxes


# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
项目:mean-teacher    作者:CuriousAI    | 项目源码 | 文件源码
def errors(logits, labels, name=None):
    """Compute error mean and whether each unlabeled example is erroneous

    Assume unlabeled examples have label == -1.
    Compute the mean error over unlabeled examples.
    Mean error is NaN if there are no unlabeled examples.
    Note that unlabeled examples are treated differently in cost calculation.
    """
    with tf.name_scope(name, "errors") as scope:
        applicable = tf.not_equal(labels, -1)
        labels = tf.boolean_mask(labels, applicable)
        logits = tf.boolean_mask(logits, applicable)
        predictions = tf.argmax(logits, -1)
        labels = tf.cast(labels, tf.int64)
        per_sample = tf.to_float(tf.not_equal(predictions, labels))
        mean = tf.reduce_mean(per_sample, name=scope)
        return mean, per_sample
项目:mean-teacher    作者:CuriousAI    | 项目源码 | 文件源码
def classification_costs(logits, labels, name=None):
    """Compute classification cost mean and classification cost per sample

    Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
    Compute the mean over all examples.
    Note that unlabeled examples are treated differently in error calculation.
    """
    with tf.name_scope(name, "classification_costs") as scope:
        applicable = tf.not_equal(labels, -1)

        # Change -1s to zeros to make cross-entropy computable
        labels = tf.where(applicable, labels, tf.zeros_like(labels))

        # This will now have incorrect values for unlabeled examples
        per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)

        # Retain costs only for labeled
        per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))

        # Take mean over all examples, not just labeled examples.
        labeled_sum = tf.reduce_sum(per_sample)
        total_count = tf.to_float(tf.shape(per_sample)[0])
        mean = tf.div(labeled_sum, total_count, name=scope)

        return mean, per_sample
项目:TextGAN    作者:ankitkv    | 项目源码 | 文件源码
def unwrap_output_sparse(self, final_state, include_stop_tokens=True):
        """
        Retreive the beam search output from the final state.

        Returns a sparse tensor with underlying dimensions of [batch_size, max_len]
        """
        output_dense = final_state[0]
        mask = tf.not_equal(output_dense, self.stop_token)

        if include_stop_tokens:
            output_dense = tf.concat(1, [output_dense[:, 1:],
                                         tf.ones_like(output_dense[:, 0:1]) *
                                         self.stop_token])
            mask = tf.concat(1, [mask[:, 1:], tf.cast(tf.ones_like(mask[:, 0:1],
                                                                   dtype=tf.int8),
                                                      tf.bool)])

        return sparse_boolean_mask(output_dense, mask)
项目:TextGAN    作者:ankitkv    | 项目源码 | 文件源码
def mle_loss(self, outputs, targets):
        '''Maximum likelihood estimation loss.'''
        present_mask = tf.greater(targets, 0, name='present_mask')
        # don't enfoce loss on true <unk>'s
        unk_mask = tf.not_equal(targets, self.vocab.unk_index, name='unk_mask')
        mask = tf.cast(tf.logical_and(present_mask, unk_mask), tf.float32)
        output = tf.reshape(tf.concat(1, outputs), [-1, cfg.hidden_size])
        if self.training and cfg.softmax_samples < len(self.vocab.vocab):
            targets = tf.reshape(targets, [-1, 1])
            mask = tf.reshape(mask, [-1])
            loss = tf.nn.sampled_softmax_loss(self.softmax_w, self.softmax_b, output, targets,
                                              cfg.softmax_samples, len(self.vocab.vocab))
            loss *= mask
        else:
            logits = tf.nn.bias_add(tf.matmul(output, tf.transpose(self.softmax_w),
                                              name='softmax_transform_mle'), self.softmax_b)
            loss = tf.nn.seq2seq.sequence_loss_by_example([logits],
                                                          [tf.reshape(targets, [-1])],
                                                          [tf.reshape(mask, [-1])])
        return tf.reshape(loss, [cfg.batch_size, -1])
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def confidence_cnn13(image_with_alpha, input_size=512):
    image = tf.slice(image_with_alpha,[0,0,0,0],[-1,-1,-1,3])
    alpha = tf.slice(image_with_alpha,[0,0,0,3],[-1,-1,-1,1])

    #print ('image', image)
    #print ('alpha', alpha)

    visable = tf.not_equal(alpha, tf.zeros_like(alpha))

    confidence = confidence_cnn3(image, input_size)

    final_confidence = tf.where(visable, confidence, tf.zeros_like(confidence))

    #print ('final conf', final_confidence)

    return final_confidence
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def confidence_cnn14(image_with_alpha, input_size=512):
    image = tf.slice(image_with_alpha,[0,0,0,0],[-1,-1,-1,3])
    alpha = tf.slice(image_with_alpha,[0,0,0,3],[-1,-1,-1,1])

    #print ('image', image)
    #print ('alpha', alpha)

    visable = tf.not_equal(alpha, tf.zeros_like(alpha))

    confidence = confidence_cnn4(image, input_size)

    final_confidence = tf.where(visable, confidence, tf.zeros_like(confidence))

    #print ('final conf', final_confidence)

    return final_confidence
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def confidence_cnn23(image_with_alpha, input_size=512):
    image = tf.slice(image_with_alpha,[0,0,0,0],[-1,-1,-1,3])
    alpha = tf.slice(image_with_alpha,[0,0,0,3],[-1,-1,-1,1])

    #print ('image', image)
    #print ('alpha', alpha)

    visable = tf.not_equal(alpha, tf.zeros_like(alpha))

    confidence = confidence_cnn3(image, input_size)

    negative_confidence = tf.multiply(tf.ones_like(confidence),tf.constant(-1.0))

    final_confidence = tf.where(visable, confidence, negative_confidence)

    #print ('final conf', final_confidence)

    return final_confidence
项目:Deep-Fashion    作者:TomPyonsuke    | 项目源码 | 文件源码
def bboxes_filter_labels(labels, bboxes,
                         out_labels=[], num_classes=np.inf,
                         scope=None):
    """Filter out labels from a collection. Typically used to get
    of DontCare elements. Also remove elements based on the number of classes.

    Return:
      labels, bboxes: Filtered elements.
    """
    with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
        mask = tf.greater_equal(labels, num_classes)
        for l in labels:
            mask = tf.logical_and(mask, tf.not_equal(labels, l))
        labels = tf.boolean_mask(labels, mask)
        bboxes = tf.boolean_mask(bboxes, mask)
        return labels, bboxes


# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
项目:master-thesis    作者:AndreasMadsen    | 项目源码 | 文件源码
def _build_metric(self, model: 'code.model.abstract.Model') -> tf.Tensor:
        with tf.name_scope(None, self.metric_name,
                           values=[self.dataset.source, self.dataset.target]):
            x = self.dataset.source
            y = self.dataset.target
            length = self.dataset.length

            # build mask
            mask = tf.cast(tf.not_equal(y, tf.zeros_like(y)), tf.float32)

            # create masked error tensor
            errors = tf.not_equal(
                model.inference_model(x, length, reuse=True), y
            )
            errors = tf.cast(errors, tf.float32) * mask  # mask errors

            # tf.sum(mask) is the number of unmasked elements
            return tf.reduce_sum(errors) / tf.reduce_sum(mask)
项目:dspp-keras    作者:PeptoneInc    | 项目源码 | 文件源码
def chi2(exp, obs):
    """
        Compute CHI^2 statistics of non-zero expected elements
    """
    zero = tf.constant(0, dtype=tf.float32)
    mask = tf.not_equal(exp, zero)

    def masking(tensor, mask):
        return tf.boolean_mask(tensor, mask)

    stat = tf.reduce_sum(
        tf.div(
            tf.pow(
                tf.subtract(masking(obs, mask), masking(exp, mask)),
                2),
            masking(exp, mask)),
        name="chi2_statistics")

    return stat
项目:DAVIS-2016-Chanllege-Solution    作者:tangyuhao    | 项目源码 | 文件源码
def bboxes_filter_labels(labels, bboxes,
                         out_labels=[], num_classes=np.inf,
                         scope=None):
    """Filter out labels from a collection. Typically used to get
    of DontCare elements. Also remove elements based on the number of classes.

    Return:
      labels, bboxes: Filtered elements.
    """
    with tf.name_scope(scope, 'bboxes_filter_labels', [labels, bboxes]):
        mask = tf.greater_equal(labels, num_classes)
        for l in labels:
            mask = tf.logical_and(mask, tf.not_equal(labels, l))
        labels = tf.boolean_mask(labels, mask)
        bboxes = tf.boolean_mask(bboxes, mask)
        return labels, bboxes


# =========================================================================== #
# Standard boxes computation.
# =========================================================================== #
项目:treelstm    作者:nicolaspi    | 项目源码 | 文件源码
def add_embedding(self, embeddings):

        #embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))
        if embeddings is not None:
            initializer = embeddings
        else:
            initializer = tf.random_uniform_initializer(-0.05,0.05)

        with tf.variable_scope("Embed",regularizer=None):
            embedding=tf.Variable(initial_value = initializer, trainable=True, name = 'embedding', dtype='float32')
            ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
            emb_tree=tf.nn.embedding_lookup(embedding,ix)
            emb_tree=emb_tree*(tf.expand_dims(
                        tf.to_float(tf.not_equal(self.input,-1)),2))

            return emb_tree
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def build_graph(all_readers,
                input_reader,
                input_data_pattern,
                all_eval_data_patterns,
                batch_size=256):

  original_video_id, original_input, unused_labels_batch, unused_num_frames = (
      get_input_evaluation_tensors(
          input_reader,
          input_data_pattern,
          batch_size=batch_size))

  video_id_notequal_tensors = []
  model_input_tensor = None
  input_distance_tensors = []
  for reader, data_pattern in zip(all_readers, all_eval_data_patterns):
    video_id, model_input_raw, labels_batch, unused_num_frames = (
        get_input_evaluation_tensors(
            reader,
            data_pattern,
            batch_size=batch_size))
    video_id_notequal_tensors.append(tf.reduce_sum(tf.cast(tf.not_equal(original_video_id, video_id), dtype=tf.float32)))
    if model_input_tensor is None:
      model_input_tensor = model_input_raw
    input_distance_tensors.append(tf.reduce_mean(tf.reduce_sum(tf.square(model_input_tensor - model_input_raw), axis=1)))

  video_id_mismatch_tensor = tf.stack(video_id_notequal_tensors)
  input_distance_tensor = tf.stack(input_distance_tensors)
  actual_batch_size = tf.shape(original_video_id)[0]

  tf.add_to_collection("video_id_mismatch", video_id_mismatch_tensor)
  tf.add_to_collection("input_distance", input_distance_tensor)
  tf.add_to_collection("actual_batch_size", actual_batch_size)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def build_graph(all_readers,
                input_reader,
                input_data_pattern,
                all_eval_data_patterns,
                batch_size=256):

  original_video_id, original_input, unused_labels_batch, unused_num_frames = (
      get_input_evaluation_tensors(
          input_reader,
          input_data_pattern,
          batch_size=batch_size))

  video_id_equal_tensors = []
  model_input_tensor = None
  input_distance_tensors = []
  for reader, data_pattern in zip(all_readers, all_eval_data_patterns):
    video_id, model_input_raw, labels_batch, unused_num_frames = (
        get_input_evaluation_tensors(
            reader,
            data_pattern,
            batch_size=batch_size))
    video_id_equal_tensors.append(tf.reduce_sum(tf.cast(tf.not_equal(original_video_id, video_id), dtype=tf.float32)))
    if model_input_tensor is None:
      model_input_tensor = model_input_raw
    input_distance_tensors.append(tf.reduce_mean(tf.reduce_sum(tf.square(model_input_tensor - model_input_raw), axis=1)))

  video_id_equal_tensor = tf.stack(video_id_equal_tensors)
  input_distance_tensor = tf.stack(input_distance_tensors)

  tf.add_to_collection("video_id_equal", video_id_equal_tensor)
  tf.add_to_collection("input_distance", input_distance_tensor)
项目:bp-mll-tensorflow    作者:vanHavel    | 项目源码 | 文件源码
def bp_mll_loss(y_true, y_pred):

    # get true and false labels
    shape = tf.shape(y_true)
    y_i = tf.equal(y_true, tf.ones(shape))
    y_i_bar = tf.not_equal(y_true, tf.ones(shape))

    # get indices to check
    truth_matrix = tf.to_float(pairwise_and(y_i, y_i_bar))

    # calculate all exp'd differences
    sub_matrix = pairwise_sub(y_pred, y_pred)
    exp_matrix = tf.exp(tf.negative(sub_matrix))

    # check which differences to consider and sum them
    sparse_matrix = tf.multiply(exp_matrix, truth_matrix)
    sums = tf.reduce_sum(sparse_matrix, axis=[1,2])

    # get normalizing terms and apply them
    y_i_sizes = tf.reduce_sum(tf.to_float(y_i), axis=1)
    y_i_bar_sizes = tf.reduce_sum(tf.to_float(y_i_bar), axis=1)
    normalizers = tf.multiply(y_i_sizes, y_i_bar_sizes)
    results = tf.divide(sums, normalizers)

    # sum over samples
    return tf.reduce_sum(results)

# compute pairwise differences between elements of the tensors a and b
项目:main_loop_tf    作者:fvisin    | 项目源码 | 文件源码
def apply_loss(labels, net_out, loss_fn, weight_decay, is_training,
               return_mean_loss=False, mask_voids=True):
    '''Applies the user-specified loss function and returns the loss

    Note:
        SoftmaxCrossEntropyWithLogits expects labels NOT to be one-hot
        and net_out to be one-hot.
    '''

    cfg = gflags.cfg

    if mask_voids and len(cfg.void_labels):
        # TODO Check this
        print('Masking the void labels')
        mask = tf.not_equal(labels, cfg.void_labels)
        labels *= tf.cast(mask, 'int32')  # void_class --> 0 (random class)
        # Train loss
        loss = loss_fn(labels=labels,
                       logits=tf.reshape(net_out, [-1, cfg.nclasses]))
        mask = tf.cast(mask, 'float32')
        loss *= mask
    else:
        # Train loss
        loss = loss_fn(labels=labels,
                       logits=tf.reshape(net_out, [-1, cfg.nclasses]))

    if is_training:
        loss = apply_l2_penalty(loss, weight_decay)

    # Return the mean loss (over pixels *and* batches)
    if return_mean_loss:
        if mask_voids and len(cfg.void_labels):
            return tf.reduce_sum(loss) / tf.reduce_sum(mask)
        else:
            return tf.reduce_mean(loss)
    else:
        return loss
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _instantiate_subnet(self, batch, block_idx, seq_prefix):
    def zeros_fn():
      return tf.zeros_like(batch)
    def base_case_fn():
      return self._children[block_idx, seq_prefix](batch)
    def recursive_case_fn():
      first_subnet = self._instantiate_subnet(
          batch, block_idx, seq_prefix + (0,))
      return self._instantiate_subnet(
          first_subnet, block_idx, seq_prefix + (1,))
    if len(seq_prefix) == self._fractal_block_depth:
      return base_case_fn()
    else:
      choice = self._drop_path_choices[self._choice_id[(block_idx, seq_prefix)]]
      base_case = tf.cond(
          tf.not_equal(choice, self._JUST_RECURSE), base_case_fn, zeros_fn)
      base_case.set_shape(batch.get_shape())
      recursive_case = tf.cond(
          tf.not_equal(choice, self._JUST_BASE), recursive_case_fn, zeros_fn)
      recursive_case.set_shape(batch.get_shape())
      cases = [
          (tf.equal(choice, self._BOTH),
           lambda: self._mixer(base_case, recursive_case)),
          (tf.equal(choice, self._JUST_BASE), lambda: base_case),
          (tf.equal(choice, self._JUST_RECURSE), lambda: recursive_case)]
      result = tf.case(cases, lambda: base_case)
      result.set_shape(batch.get_shape())
      return result
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def not_equal(x, y):
    '''Element-wise inequality between two tensors.
    Returns a bool tensor.
    '''
    return tf.not_equal(x, y)
项目:seq2seq    作者:eske    | 项目源码 | 文件源码
def get_weights(sequence, eos_id, include_first_eos=True):
    cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
    range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
    range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
    weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))

    if include_first_eos:
        weights = weights[:,:-1]
        shape = [tf.shape(weights)[0], 1]
        weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)

    return tf.stop_gradient(weights)
项目:seq2seq    作者:eske    | 项目源码 | 文件源码
def get_weights(sequence, eos_id, include_first_eos=True):
    cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
    range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
    range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
    weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))

    if include_first_eos:
        weights = weights[:,:-1]
        shape = [tf.shape(weights)[0], 1]
        weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)

    return tf.stop_gradient(weights)
项目:tfzip    作者:gstaff    | 项目源码 | 文件源码
def print_mask_parameter_counts():
    print("# Mask Parameter Counts")
    print("  - Mask1: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix1, tf.zeros_like(indicator_matrix1)))))))
    print("  - Mask2: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix2, tf.zeros_like(indicator_matrix2)))))))
    print("  - Mask3: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix3, tf.zeros_like(indicator_matrix3)))))))
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def not_equal(self, x, y):
        '''Element-wise inequality between two tensors.
        Returns a bool tensor.
        '''
        return tf.not_equal(x, y)
项目:text-gan-tensorflow    作者:tokestermw    | 项目源码 | 文件源码
def preprocess(data):
    # PaddingFIFOQueue pads to the max size seen in the data (instead of the minibatch)
    # by chopping off the ends, this limits redundant computations in the output layer
    sequence_length = tf.reduce_sum(tf.cast(tf.not_equal(data, 0), dtype=tf.int32), axis=1)
    maximum_sequence_length = tf.reduce_max(sequence_length)
    data = data[:, :maximum_sequence_length] 

    source = data[:, :-1]
    target = data[:, 1:]
    sequence_length -= 1
    return source, target, sequence_length
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def not_equal(x, y):
    """Element-wise inequality between two tensors.

    # Returns
        A bool tensor.
    """
    return tf.not_equal(x, y)
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def error(self):
        mistakes = tf.not_equal(
            tf.argmax(self._target, 2), tf.argmax(self.prediction, 2))
        mistakes = tf.cast(mistakes, tf.float32)
        mask = tf.sign(tf.reduce_max(self._target, reduction_indices=2))
        mistakes *= mask
        # Average over actual sequence lengths.
        mistakes = tf.reduce_sum(mistakes, reduction_indices=1)
        mistakes /= tf.cast(self._length, tf.float32)
        return tf.reduce_mean(mistakes)
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def num_of_error(self):
        mistakes = tf.not_equal(
            tf.argmax(self._target, 2), tf.argmax(self.prediction, 2))
        mistakes = tf.cast(mistakes, tf.float32)
        mask = tf.sign(tf.reduce_max(self._target, reduction_indices=2))
        mistakes *= mask
        # Average over actual sequence lengths.
        mistakes = tf.reduce_sum(mistakes, reduction_indices=1)
        return mistakes
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def seg_num_of_error(self):
        mistakes = tf.not_equal(
            tf.argmax(self._target, 2), tf.argmax(self.seg_prediction, 2))
        mistakes = tf.cast(mistakes, tf.float32)
        mask = tf.sign(tf.reduce_max(self.target, reduction_indices=2))
        mistakes *= mask
        # Average over actual sequence lengths.
        mistakes = tf.reduce_sum(mistakes)
        return mistakes
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def pos_num_of_error(self):
        mistakes = tf.not_equal(
            tf.argmax(self._pos, 2), tf.argmax(self.pos_prediction, 2))
        mistakes = tf.cast(mistakes, tf.float32)
        mask = tf.sign(tf.reduce_max(self._pos, reduction_indices=2))
        mistakes *= mask
        # Average over actual sequence lengths.
        mistakes = tf.reduce_sum(mistakes, reduction_indices=1)
        return mistakes
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def compute_error(self, test_state, target, s=None):
        prediction = self.predict(test_state, s)
        incorrects = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1)) # always 1?
        return tf.reduce_mean(tf.cast(incorrects, tf.float32)) # what is reduce_mean?
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def compute_error(self, test_state, target, s=None):
        prediction = self.predict(test_state, s)
        incorrects = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1)) # always 1?
        return tf.reduce_mean(tf.cast(incorrects, tf.float32)) # what is reduce_mean?
项目:LSTM-TensorSpark    作者:EmanuelOverflow    | 项目源码 | 文件源码
def compute_error(self, test_state, target, s=None):
        prediction = self.predict(test_state, s)
        incorrects = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1)) # always 1?
        return tf.reduce_mean(tf.cast(incorrects, tf.float32)) # what is reduce_mean?
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def char_accuracy(predictions, targets, rej_char, streaming=False):
    """Computes character level accuracy.
    Both predictions and targets should have the same shape
    [batch_size x seq_length].

    Args:
        predictions: predicted characters ids.
        targets: ground truth character ids.
        rej_char: the character id used to mark an empty element (end of sequence).
        streaming: if True, uses the streaming mean from the slim.metric module.

    Returns:
        a update_ops for execution and value tensor whose value on evaluation
            returns the total character accuracy.
    """
    with tf.variable_scope('CharAccuracy'):
        predictions.get_shape().assert_is_compatible_with(targets.get_shape())

        targets = tf.to_int32(targets)
        const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
        weights = tf.to_float(tf.not_equal(targets, const_rej_char))
        correct_chars = tf.to_float(tf.equal(predictions, targets))
        accuracy_per_example = tf.div(tf.reduce_sum(tf.multiply(
            correct_chars, weights), 1),  tf.reduce_sum(weights, 1))
        if streaming:
            return tf.contrib.metrics.streaming_mean(accuracy_per_example)
        else:
            return tf.reduce_mean(accuracy_per_example)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def sequence_accuracy(predictions, targets, rej_char, streaming=False):
    """Computes sequence level accuracy.
    Both input tensors should have the same shape: [batch_size x seq_length].

    Args:
        predictions: predicted character classes.
        targets: ground truth character classes.
        rej_char: the character id used to mark empty element (end of sequence).
        streaming: if True, uses the streaming mean from the slim.metric module.

    Returns:
        a update_ops for execution and value tensor whose value on evaluation
            returns the total sequence accuracy.
    """

    with tf.variable_scope('SequenceAccuracy'):
        predictions.get_shape().assert_is_compatible_with(targets.get_shape())

        targets = tf.to_int32(targets)
        const_rej_char = tf.constant(
            rej_char, shape=targets.get_shape(), dtype=tf.int32)
        include_mask = tf.not_equal(targets, const_rej_char)
        include_predictions = tf.to_int32(
            tf.where(include_mask, predictions, tf.zeros_like(predictions) + rej_char))
        correct_chars = tf.to_float(tf.equal(include_predictions, targets))
        correct_chars_counts = tf.cast(
            tf.reduce_sum(correct_chars, reduction_indices=[1]), dtype=tf.int32)
        target_length = targets.get_shape().dims[1].value
        target_chars_counts = tf.constant(
            target_length, shape=correct_chars_counts.get_shape())
        accuracy_per_example = tf.to_float(
            tf.equal(correct_chars_counts, target_chars_counts))
        if streaming:
            return tf.contrib.metrics.streaming_mean(accuracy_per_example)
        else:
            return tf.reduce_mean(accuracy_per_example)
项目:neural-chat    作者:henriblancke    | 项目源码 | 文件源码
def decode_sparse(self, include_stop_tokens=True):
        dense_symbols, logprobs = self.decode_dense()
        mask = tf.not_equal(dense_symbols, self.stop_token)
        if include_stop_tokens:
            mask = tf.concat(1, [tf.ones_like(mask[:, :1]), mask[:, :-1]])
        return sparse_boolean_mask(dense_symbols, mask), logprobs
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = tf.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = tf.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, tf.add, core.add),
        ('sub', operator.sub, tf.sub, core.sub),
        ('mul', operator.mul, tf.mul, core.mul),
        ('div', operator.truediv, tf.div, core.div),
        ('mod', operator.mod, tf.mod, core.mod),
        ('pow', operator.pow, tf.pow, core.pow_function),
        ('equal', None, tf.equal, core.equal),
        ('less', operator.lt, tf.less, core.less),
        ('less_equal', operator.le, tf.less_equal, core.less_equal),
        ('not_equal', None, tf.not_equal, core.not_equal),
        ('greater', operator.gt, tf.greater, core.greater),
        ('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def not_equal(x, y):
    '''Element-wise inequality between two tensors.
    Returns a bool tensor.
    '''
    return tf.not_equal(x, y)
项目:chars2word2vec    作者:ilya-shenbin    | 项目源码 | 文件源码
def w2v_error(self):
        # mistakes = tf.not_equal(tf.argmax(self.target, 1), tf.argmax(self.encoder, 1))
        # return tf.reduce_mean(tf.cast(mistakes, tf.float32))
        y_true = tf.nn.l2_normalize(self.target, dim=-1)
        y_pred = tf.nn.l2_normalize(self.w2v_predictor, dim=-1)
        return -tf.reduce_mean(y_true * y_pred)
项目:deepsleepnet    作者:akaraspt    | 项目源码 | 文件源码
def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
    elif data_shape_size == 2:
        return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
    else:
        raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
项目:deepsleepnet    作者:akaraspt    | 项目源码 | 文件源码
def target_mask_op(data, pad_val=0):        # HangSheng: return tensor for mask,if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
    elif data_shape_size == 2:
        return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))


# Dynamic RNN
项目:DBQA    作者:nanfeng1101    | 项目源码 | 文件源码
def errors(self, y):
        return tf.reduce_mean(tf.cast(tf.not_equal(self.y_pred, tf.arg_max(y,1)), dtype=tf.float32))
项目:npfl114    作者:ufal    | 项目源码 | 文件源码
def __init__(self, logdir, experiment, threads):
        # Construct the graph
        with tf.name_scope("inputs"):
            self.images = tf.placeholder(tf.float32, [None, WIDTH, HEIGHT, 1], name="images")
            self.labels = tf.placeholder(tf.int64, [None], name="labels")
            flattened_images = layers.flatten(self.images)

        hidden_layer = layers.fully_connected(flattened_images, num_outputs=HIDDEN, activation_fn=tf.nn.relu, scope="hidden_layer")
        output_layer = layers.fully_connected(hidden_layer, num_outputs=LABELS, activation_fn=None, scope="output_layer")

        loss = losses.sparse_softmax_cross_entropy(output_layer, self.labels, scope="loss")
        self.training = layers.optimize_loss(loss, None, None, tf.train.AdamOptimizer(), summaries=['loss', 'gradients', 'gradient_norm'], name='training')

        with tf.name_scope("accuracy"):
            predictions = tf.argmax(output_layer, 1, name="predictions")
            accuracy = metrics.accuracy(predictions, self.labels)
            tf.scalar_summary("training/accuracy", accuracy)

        with tf.name_scope("confusion_matrix"):
            confusion_matrix = metrics.confusion_matrix(predictions, self.labels, weights=tf.not_equal(predictions, self.labels), dtype=tf.float32)
            confusion_image = tf.reshape(confusion_matrix, [1, LABELS, LABELS, 1])

        # Summaries
        self.summaries = {'training': tf.merge_all_summaries() }
        for dataset in ["dev", "test"]:
            self.summaries[dataset] = tf.merge_summary([tf.scalar_summary(dataset + "/accuracy", accuracy),
                                                        tf.image_summary(dataset + "/confusion_matrix", confusion_image)])

        # Create the session
        self.session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=threads,
                                                        intra_op_parallelism_threads=threads))

        self.session.run(tf.initialize_all_variables())
        timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
        self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, experiment), graph=self.session.graph, flush_secs=10)
        self.steps = 0
项目:lda2vec-tf    作者:meereeum    | 项目源码 | 文件源码
def __call__(self, embed, train_labels):

        with tf.name_scope("negative_sampling"):
            # mask out skip or OOV
            # if switched on, this yields ...
            # UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.

            # mask = tf.greater(train_labels, NegativeSampling.IGNORE_LABEL_MAX)
            # # mask = tf.not_equal(train_labels, NegativeSampling.IGNORE_LABEL)
            # embed = tf.boolean_mask(embed, mask)
            # train_labels = tf.expand_dims(tf.boolean_mask(train_labels, mask), -1)
            train_labels = tf.expand_dims(train_labels, -1)

            # Compute the average NCE loss for the batch.
            # tf.nce_loss automatically draws a new sample of the negative labels each
            # time we evaluate the loss.
            # By default this uses a log-uniform (Zipfian) distribution for sampling
            # and therefore assumes labels are sorted - which they are!

            sampler = (self.freqs if self.freqs is None # default to unigram
                       else tf.nn.fixed_unigram_candidate_sampler(
                               train_labels, num_true=1, num_sampled=self.sample_size,
                               unique=True, range_max=self.vocab_size,
                               #num_reserved_ids=2, # skip or OoV
                               # ^ only if not in unigrams
                               distortion=self.power, unigrams=list(self.freqs)))

            loss = tf.reduce_mean(
                    tf.nn.nce_loss(self.nce_weights, self.nce_biases,
                                   embed, # summed doc and context embedding
                                   train_labels, self.sample_size, self.vocab_size,
                                   sampled_values=sampler), # log-unigram if not specificed
                    name="nce_batch_loss")
            # TODO negative sampling versus NCE
            # TODO uniform vs. Zipf with exponent `distortion` param
            #https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#log_uniform_candidate_sampler

        return loss
项目:tensorlayer-chinese    作者:shorxp    | 项目源码 | 文件源码
def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1)
    elif data_shape_size == 2:
        return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
    else:
        raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
项目:tensorlayer-chinese    作者:shorxp    | 项目源码 | 文件源码
def target_mask_op(data, pad_val=0):        # HangSheng: return tensor for mask,if input is tf.string
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
    elif data_shape_size == 2:
        return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))


# Dynamic RNN
项目:TikZ    作者:ellisk42    | 项目源码 | 文件源码
def accuracy(self):
        a = tf.equal(self.hard,self.targetPlaceholder)
        for decoder in self.decoders:
            if decoder.token != STOP:
                vector = decoder.accuracyVector()
                if vector != True:
                    a = tf.logical_and(a,
                                       tf.logical_or(vector, tf.not_equal(self.hard,decoder.token)))
        return tf.reduce_mean(tf.cast(a, tf.float32))
项目:RecursiveNN    作者:sapruash    | 项目源码 | 文件源码
def add_embedding(self):

        #embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))
        with tf.variable_scope("Embed",regularizer=None):
            embedding=tf.get_variable('embedding',[self.num_emb,
                                                   self.emb_dim]
                        ,initializer=tf.random_uniform_initializer(-0.05,0.05),trainable=True,regularizer=None)
            ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
            emb_tree=tf.nn.embedding_lookup(embedding,ix)
            emb_tree=emb_tree*(tf.expand_dims(
                        tf.to_float(tf.not_equal(self.input,-1)),2))

            return emb_tree