Python keras.backend 模块,equal() 实例源码

我们从Python开源项目中,提取了以下33个代码示例,用于说明如何使用keras.backend.equal()

项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_acc(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    correct_pixels_per_class = K.sum(equal_entries, axis=1)
    n_pixels_per_class = K.sum(y_true_reshaped,axis=1)

    acc = correct_pixels_per_class / n_pixels_per_class
    acc_mask = tf.is_finite(acc)
    acc_masked = tf.boolean_mask(acc,acc_mask)

    return K.mean(acc_masked)
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_IoU(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    intersection = K.sum(equal_entries, axis=1)
    union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)

    iou = intersection / (union_per_class - intersection)
    iou_mask = tf.is_finite(iou)
    iou_masked = tf.boolean_mask(iou,iou_mask)

    return K.mean( iou_masked )
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def contingency_table(y, z):
    """Compute contingency table."""
    y = K.round(y)
    z = K.round(z)

    def count_matches(a, b):
        tmp = K.concatenate([a, b])
        return K.sum(K.cast(K.all(tmp, -1), K.floatx()))

    ones = K.ones_like(y)
    zeros = K.zeros_like(y)
    y_ones = K.equal(y, ones)
    y_zeros = K.equal(y, zeros)
    z_ones = K.equal(z, ones)
    z_zeros = K.equal(z, zeros)

    tp = count_matches(y_ones, z_ones)
    tn = count_matches(y_zeros, z_zeros)
    fp = count_matches(y_zeros, z_ones)
    fn = count_matches(y_ones, z_zeros)

    return (tp, tn, fp, fn)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_taskB(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def precision_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # return average f1 score over all classes
    return K.mean(precision)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_taskB(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #return average f1 score over all classes
    return K.mean(precision)
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def get_split_averages(input_tensor, input_mask, indices):
        # Splits input tensor into three parts based on the indices and
        # returns average of values prior to index, values at the index and
        # average of values after the index.
        # input_tensor: (batch_size, input_length, input_dim)
        # input_mask: (batch_size, input_length)
        # indices: (batch_size, 1)
        # (1, input_length)
        length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
        # (batch_size, input_length)
        batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
        tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1)  # (batch_size, input_length)
        greater_mask = K.greater(batched_range, tiled_indices)  # (batch_size, input_length)
        lesser_mask = K.lesser(batched_range, tiled_indices)  # (batch_size, input_length)
        equal_mask = K.equal(batched_range, tiled_indices)  # (batch_size, input_length)

        # We also need to mask these masks using the input mask.
        # (batch_size, input_length)
        if input_mask is not None:
            greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
            lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))

        post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)

        post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)
        pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)

        return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
项目:keras-fractalnet    作者:snf    | 项目源码 | 文件源码
def _build_global_switch(self):
        # A randomly sampled tensor that will signal if the batch
        # should use global or local droppath
        return K.equal(K.random_binomial((), p=self.global_p, seed=self.switch_seed), 1.)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def accuracy(y_true, y_pred):
    return K.mean(K.equal(K.argmax(y_true, axis=4), K.argmax(y_pred, axis=4)))
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def accuracy_mask(mask):
    def accuracy(y_true, y_pred):
        intersection = K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1))
        intersection_masked = mask * intersection
        return 1.0 * K.sum(intersection_masked) / K.sum(mask)

    return accuracy
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def w_categorical_crossentropy(y_true, y_pred):
  weights = np.array([[1., 5.],  # misclassify N -> Y
                      [10., 1.]])# misclassify Y -> N
  nb_cl = len(weights)
  final_mask = K.zeros_like(y_pred[:, 0])
  y_pred_max = K.max(y_pred, axis=1)
  y_pred_max = K.expand_dims(y_pred_max, 1)
  y_pred_max_mat = K.equal(y_pred, y_pred_max)
  for c_p, c_t in product(range(nb_cl), range(nb_cl)):
    final_mask += (
    K.cast(weights[c_t, c_p], K.floatx()) *
    K.cast(y_pred_max_mat[:, c_p],
           K.floatx()) *
    K.cast(y_true[:, c_t],K.floatx()))
  return K.categorical_crossentropy(y_pred, y_true) * final_mask
项目:bp-mll-tensorflow    作者:vanHavel    | 项目源码 | 文件源码
def bp_mll_loss(y_true, y_pred):

    # get true and false labels
    y_i = K.equal(y_true, K.ones_like(y_true))
    y_i_bar = K.not_equal(y_true, K.ones_like(y_true))

    # cast to float as keras backend has no logical and
    y_i = K.cast(y_i, dtype='float32')
    y_i_bar = K.cast(y_i_bar, dtype='float32')

    # get indices to check
    truth_matrix = pairwise_and(y_i, y_i_bar)

    # calculate all exp'd differences
    sub_matrix = pairwise_sub(y_pred, y_pred)
    exp_matrix = K.exp(-sub_matrix)

    # check which differences to consider and sum them
    sparse_matrix = exp_matrix * truth_matrix
    sums = K.sum(sparse_matrix, axis=[1,2])

    # get normalizing terms and apply them
    y_i_sizes = K.sum(y_i, axis=1)
    y_i_bar_sizes = K.sum(y_i_bar, axis=1)
    normalizers = y_i_sizes * y_i_bar_sizes
    results = sums / normalizers

    # sum over samples
    return K.sum(results)

# compute pairwise differences between elements of the tensors a and b
项目:DeepLearning-OCR    作者:xingjian-f    | 项目源码 | 文件源码
def categorical_accuracy_per_sequence(y_true, y_pred):
    return K.mean(K.min(K.equal(K.argmax(y_true, axis=-1),
                  K.argmax(y_pred, axis=-1)), axis=-1))
项目:semantic-tagging    作者:bjerva    | 项目源码 | 文件源码
def actual_accuracy(act, pred):
    '''
    Calculate accuracy each batch.
    Keras' standard calculation factors in our padding classes. We don't.
    FIXME: Not always working
    '''
    act_argm  = K.argmax(act, axis=-1)   # Indices of act. classes
    pred_argm = K.argmax(pred, axis=-1)  # Indices of pred. classes

    incorrect = K.cast(K.not_equal(act_argm, pred_argm), dtype='float32')
    correct   = K.cast(K.equal(act_argm, pred_argm), dtype='float32')
    padding   = K.cast(K.equal(K.sum(act), 0), dtype='float32')
    start     = K.cast(K.equal(act_argm, 0), dtype='float32')
    end       = K.cast(K.equal(act_argm, 1), dtype='float32')

    pad_start     = K.maximum(padding, start)
    pad_start_end = K.maximum(pad_start, end) # 1 where pad, start or end

    # Subtract pad_start_end from correct, then check equality to 1
    # E.g.: act: [pad, pad, pad, <s>, tag, tag, tag, </s>]
    #      pred: [pad, tag, pad, <s>, tag, tag, err, </s>]
    #   correct: [1,     0,   1,   1,   1,   1,   0,    1]
    #     p_s_e: [1,     1,   1,   1,,  0,   0,   0,    1]
    #  corr-pse: [0,    -1,   0,   0,   1,   1,   0,    0] # Subtraction
    # actu_corr: [0,     0,   0,   0,   1,   1,   0,    0] # Check equality to 1
    corr_preds   = K.sum(K.cast(K.equal(correct - pad_start_end, 1), dtype='float32'))
    incorr_preds = K.sum(K.cast(K.equal(incorrect - pad_start_end, 1), dtype='float32'))
    total = corr_preds + incorr_preds
    accuracy = corr_preds / total

    return accuracy
项目:pmet    作者:bkj    | 项目源码 | 文件源码
def lifted_loss(margin=1):
    """
      Lifted loss, per "Deep Metric Learning via Lifted Structured Feature Embedding" by Song et al
      Implemented in `keras`

      See also the `pytorch` implementation at: https://gist.github.com/bkj/565c5e145786cfd362cffdbd8c089cf4
    """
    def f(target, score):

        # Compute mask (-1 for different class, 1 for same class, 0 for diagonal)
        mask = (2 * K.equal(0, target - K.reshape(target, (-1, 1))) - 1)
        mask = (mask - K.eye(score.shape[0]))

        # Compute distance between rows
        mag  = (score ** 2).sum(axis=-1)
        mag  = K.tile(mag, (mag.shape[0], 1))
        dist = (mag + mag.T - 2 * score.dot(score.T))
        dist = K.sqrt(K.maximum(0, dist))

        # Negative component (points from different class should be far)
        l_n = K.sum((K.exp(margin - dist) * K.equal(mask, -1)), axis=-1)
        l_n = K.tile(l_n, (score.shape[0], 1))
        l_n = K.log(l_n + K.transpose(l_n))
        l_n = l_n * K.equal(mask, 1)

        # Positive component (points from same class should be close)
        l_p = dist * K.equal(mask, 1)

        loss  = K.sum((K.maximum(0, l_n + l_p) ** 2))
        n_pos = K.sum(K.equal(mask, 1))
        loss /= (2 * n_pos)

        return loss

    return f

# --
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def pixel_acc(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    correct_pixels_per_class = K.cast( K.equal(clf_pred,y_true_reshaped), dtype='float32')

    return K.sum(correct_pixels_per_class) / K.cast(K.prod(s), dtype='float32')
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def _sample_weights(y, mask=None):
    """Compute sample weights."""
    if mask is None:
        weights = K.ones_like(y)
    else:
        weights = 1 - K.cast(K.equal(y, mask), K.floatx())
    return weights
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def _cat_sample_weights(y, mask=None):
    return 1 - K.cast(K.equal(K.sum(y, axis=-1), 0), K.floatx())
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def cat_acc(y, z):
    """Compute categorical accuracy given one-hot matrices."""
    weights = _cat_sample_weights(y)
    _acc = K.cast(K.equal(K.argmax(y, axis=-1),
                          K.argmax(z, axis=-1)),
                  K.floatx())
    _acc = K.sum(_acc * weights) / K.sum(weights)
    return _acc
项目:keras-attention    作者:datalogue    | 项目源码 | 文件源码
def all_acc(y_true, y_pred):
    """
        All Accuracy
        https://github.com/rasmusbergpalm/normalization/blob/master/train.py#L10
    """
    return K.mean(
        K.all(
            K.equal(
                K.max(y_true, axis=-1),
                K.cast(K.argmax(y_pred, axis=-1), K.floatx())
            ),
            axis=1)
    )
项目:neural-reading-comp    作者:tianwang95    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        assert isinstance(inputs, list)
        if mask:
            assert isinstance(mask, list) or isinstance(mask, tuple)
            if self.concat_axis == 2:
                assert K.equal(mask[0], mask[1])

        return K.concatenate(inputs, axis=self.concat_axis)
项目:neural-reading-comp    作者:tianwang95    | 项目源码 | 文件源码
def compute_mask(self, inputs, input_mask=None):
        if input_mask:
            assert(isinstance(input_mask, list) or isinstance(input_mask, tuple))
            if self.concat_axis == 2:
                assert(K.equal(input_mask[0], input_mask[1]))
                return input_mask[0]
            elif self.concat_axis == 1:
                return K.concatenate(input_mask, axis=1)
            else:
                return None
        else:
            return None
项目:neural-reading-comp    作者:tianwang95    | 项目源码 | 文件源码
def call(self, inputs, mask=None):
        """
        just does batch dot
        """
        assert K.equal(mask[0], mask[1])

        return K.batch_dot(inputs[0], inputs[1], axes=(1,1))
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true)[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return K.mean(f1_class)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return (f1_class[0] + f1_class[2]) / 2.0
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return f1_class[1]
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_semeval(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    #y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred/pred_cnt, name='precision_f1_semeval')

    #recall for each class
    recall = tf.select(K.equal(gold_cnt, 0),  K.zeros_like(y_true_pred),  y_true_pred/gold_cnt, name='racall_f1_semeval')

    #f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0),  K.zeros_like(y_true_pred),  2*(precision*recall)/(precision+recall), name='precision_f1_semeval')

    #return average f1 score over all classes
    return (f1_class[0] + f1_class[2])/2.0
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.arange(y_true.shape[0])
    indices_y = K.argmax(y_pred, axis=-1)
    indices = K.concatenate(indices_x, indices_y)
    values = K.ones_like(indices_x)
    shape = y_pred_ones.shape
    delta = tf.SparseTensor(indices, values, shape)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class[1]
项目:Keras-FCN    作者:aurora95    | 项目源码 | 文件源码
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, nb_classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))

# This IOU implementation is wrong!!!
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # x[0]: (batch_size, input_length, input_dim)
        # x[1]: (batch_size, 1) indices of prepositions
        # Optional: x[2]: (batch_size, input_length - 2)
        assert isinstance(x, list) or isinstance(x, tuple)
        encoded_sentence = x[0]
        prep_indices = K.squeeze(x[1], axis=-1)  #(batch_size,)
        batch_indices = K.arange(K.shape(encoded_sentence)[0])  # (batch_size,)
        if self.with_attachment_probs:
            # We're essentially doing K.argmax(x[2]) here, but argmax is not differentiable!
            head_probs = x[2]
            head_probs_padding = K.zeros_like(x[2])[:, :2]  # (batch_size, 2)
            # (batch_size, input_length)
            padded_head_probs = K.concatenate([head_probs, head_probs_padding])
            # (batch_size, 1)
            max_head_probs = K.expand_dims(K.max(padded_head_probs, axis=1))
            # (batch_size, input_length, 1)
            max_head_prob_indices = K.expand_dims(K.equal(padded_head_probs, max_head_probs))
            # (batch_size, input_length, input_dim)
            masked_head_encoding = K.switch(max_head_prob_indices, encoded_sentence, K.zeros_like(encoded_sentence))
            # (batch_size, input_dim)
            head_encoding = K.sum(masked_head_encoding, axis=1)
        else:
            head_indices = prep_indices - 1  # (batch_size,)
            head_encoding = encoded_sentence[batch_indices, head_indices, :]  # (batch_size, input_dim)
        prep_encoding = encoded_sentence[batch_indices, prep_indices, :]  # (batch_size, input_dim)
        child_encoding = encoded_sentence[batch_indices, prep_indices+1, :]  # (batch_size, input_dim)
        '''
        prep_indices = x[1]
        sentence_mask = mask[0]
        if sentence_mask is not None:
            if K.ndim(sentence_mask) > 2:
                # This means this layer came after a Bidirectional layer. Keras has this bug which
                # concatenates input masks instead of output masks.
                # TODO: Fix Bidirectional instead.
                sentence_mask = K.any(sentence_mask, axis=(-2, -1))
        head_encoding, prep_encoding, child_encoding = self.get_split_averages(encoded_sentence, sentence_mask,
                                                                               prep_indices)
        '''
        head_projection = K.dot(head_encoding, self.proj_head)  # (batch_size, proj_dim)
        prep_projection = K.dot(prep_encoding, self.proj_prep)  # (batch_size, proj_dim)
        child_projection = K.dot(child_encoding, self.proj_child)  # (batch_size, proj_dim)
        #(batch_size, proj_dim)
        if self.composition_type == 'HPCT':
            composed_projection = K.tanh(head_projection + prep_projection + child_projection)
        elif self.composition_type == 'HPC':
            prep_child_projection = K.tanh(prep_projection + child_projection)  # (batch_size, proj_dim)
            composed_projection = K.tanh(head_projection + prep_child_projection)
        else:
            # Composition type in HC
            composed_projection = K.tanh(head_projection + child_projection)
        for hidden_layer in self.hidden_layers:
            composed_projection = K.tanh(K.dot(composed_projection, hidden_layer))  # (batch_size, proj_dim)
        # (batch_size, num_classes)
        class_scores = K.dot(composed_projection, self.scorer)
        label_probabilities = K.softmax(class_scores)
        return label_probabilities
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def IoU(n_classes, void_labels):
    def IoU_flatt(y_true, y_pred):
        '''Expects a binary class matrix instead of a vector of scalar classes.
        '''
        if dim_ordering == 'th':
            y_pred = K.permute_dimensions(y_pred, (0, 2, 3, 1))
        shp_y_pred = K.shape(y_pred)
        y_pred = K.reshape(y_pred, (shp_y_pred[0]*shp_y_pred[1]*shp_y_pred[2],
                           shp_y_pred[3]))  # go back to b01,c
        # shp_y_true = K.shape(y_true)
        y_true = K.cast(K.flatten(y_true), 'int32')  # b,01 -> b01
        y_pred = K.argmax(y_pred, axis=-1)

        # We use not_void in case the prediction falls in the void class of
        # the groundtruth
        for i in range(len(void_labels)):
            if i == 0:
                not_void = K.not_equal(y_true, void_labels[i])
            else:
                not_void = not_void * K.not_equal(y_true, void_labels[i])

        sum_I = K.zeros((1,), dtype='float32')

        out = {}
        for i in range(n_classes):
            y_true_i = K.equal(y_true, i)
            y_pred_i = K.equal(y_pred, i)

            if dim_ordering == 'th':
                I_i = K.sum(y_true_i * y_pred_i)
                U_i = K.sum(T.or_(y_true_i, y_pred_i) * not_void)
                # I = T.set_subtensor(I[i], I_i)
                # U = T.set_subtensor(U[i], U_i)
                sum_I = sum_I + I_i
            else:
                U_i = K.sum(K.cast(tf.logical_and(tf.logical_or(y_true_i, y_pred_i), not_void), 'float32'))
                y_true_i = K.cast(y_true_i, 'float32')
                y_pred_i = K.cast(y_pred_i, 'float32')
                I_i = K.sum(y_true_i * y_pred_i)
                sum_I = sum_I + I_i
            out['I'+str(i)] = I_i
            out['U'+str(i)] = U_i

        if dim_ordering == 'th':
            accuracy = K.sum(sum_I) / K.sum(not_void)
        else:
            accuracy = K.sum(sum_I) / tf.reduce_sum(tf.cast(not_void, 'float32'))
        out['acc'] = accuracy
        return out
    return IoU_flatt
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def IoU(n_classes, void_labels):
    def IoU_flatt(y_true, y_pred):
        '''Expects a binary class matrix instead of a vector of scalar classes.
        '''
        if dim_ordering == 'th':
            y_pred = K.permute_dimensions(y_pred, (0, 2, 3, 1))
        shp_y_pred = K.shape(y_pred)
        y_pred = K.reshape(y_pred, (shp_y_pred[0]*shp_y_pred[1]*shp_y_pred[2],
                           shp_y_pred[3]))  # go back to b01,c
        # shp_y_true = K.shape(y_true)
        y_true = K.cast(K.flatten(y_true), 'int32')  # b,01 -> b01
        y_pred = K.argmax(y_pred, axis=-1)

        # We use not_void in case the prediction falls in the void class of
        # the groundtruth
        for i in range(len(void_labels)):
            if i == 0:
                not_void = K.not_equal(y_true, void_labels[i])
            else:
                not_void = not_void * K.not_equal(y_true, void_labels[i])

        sum_I = K.zeros((1,), dtype='float32')

        out = {}
        for i in range(n_classes):
            y_true_i = K.equal(y_true, i)
            y_pred_i = K.equal(y_pred, i)

            if dim_ordering == 'th':
                I_i = K.sum(y_true_i * y_pred_i)
                U_i = K.sum(T.or_(y_true_i, y_pred_i) * not_void)
                # I = T.set_subtensor(I[i], I_i)
                # U = T.set_subtensor(U[i], U_i)
                sum_I = sum_I + I_i
            else:
                U_i = K.sum(K.cast(tf.logical_and(tf.logical_or(y_true_i, y_pred_i), not_void), 'float32'))
                y_true_i = K.cast(y_true_i, 'float32')
                y_pred_i = K.cast(y_pred_i, 'float32')
                I_i = K.sum(y_true_i * y_pred_i)
                sum_I = sum_I + I_i
            out['I'+str(i)] = I_i
            out['U'+str(i)] = U_i

        if dim_ordering == 'th':
            accuracy = K.sum(sum_I) / K.sum(not_void)
        else:
            accuracy = K.sum(sum_I) / tf.reduce_sum(tf.cast(not_void, 'float32'))
        out['acc'] = accuracy
        return out
    return IoU_flatt