Python sklearn.metrics 模块,hamming_loss() 实例源码

我们从Python开源项目中,提取了以下4个代码示例,用于说明如何使用sklearn.metrics.hamming_loss()

项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def test_multilabel_hamming_loss():
    # Dense label indicator matrix format
    y1 = np.array([[0, 1, 1], [1, 0, 1]])
    y2 = np.array([[0, 0, 1], [1, 0, 1]])
    w = np.array([1, 3])

    assert_equal(hamming_loss(y1, y2), 1 / 6)
    assert_equal(hamming_loss(y1, y1), 0)
    assert_equal(hamming_loss(y2, y2), 0)
    assert_equal(hamming_loss(y2, 1 - y2), 1)
    assert_equal(hamming_loss(y1, 1 - y1), 1)
    assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
    assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
    assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
    assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
    assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
    # sp_hamming only works with 1-D arrays
    assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
项目:Rnews    作者:suemi994    | 项目源码 | 文件源码
def diversity(self):
        rec=self.provider.provideAll()
        data=self.provider.provideIndexMatrix()
        count=len(data);distance=0.0
        for i in range(count):
            for j in range(i+1,count):
                distance+=hamming_loss(data[i],data[j])
        return 2*distance/(count*(count-1))
项目:DeepMIML    作者:kingfengji    | 项目源码 | 文件源码
def evaluate(classes, y_gt, y_pred, threshold_value=0.5):
    """
    Arguments:
        y_gt (num_bag x L): groud truth
        y_pred (num_bag x L): prediction
    """
    print("thresh = {:.6f}".format(threshold_value))

    y_pred_bin = y_pred >= threshold_value

    score_f1_macro = f1_score(y_gt, y_pred_bin, average="macro")
    print("Macro f1_socre = {:.6f}".format(score_f1_macro))

    score_f1_micro = f1_score(y_gt, y_pred_bin, average="micro")
    print("Micro f1_socre = {:.6f}".format(score_f1_micro))

    # hamming loss
    h_loss = hamming_loss(y_gt, y_pred_bin)
    print("Hamming Loss = {:.6f}".format(h_loss))

    mAP = average_precision_score(y_gt, y_pred)
    print("mAP = {:.2f}%".format(mAP * 100))
    # ap_classes = []
    # for i, cls in enumerate(classes):
    #     ap_cls = average_precision_score(y_gt[:, i], y_pred[:, i])
    #     ap_classes.append(ap_cls)
    #     print("AP({}) = {:.2f}%".format(cls, ap_cls * 100))
    # print("mAP = {:.2f}%".format(np.mean(ap_classes) * 100))
项目:Neural-Architecture-Search-with-RL    作者:dhruvramani    | 项目源码 | 文件源码
def evaluate(predictions, labels, threshold=0.4, multi_label=True):
    '''
        True Positive  :  Label : 1, Prediction : 1
        False Positive :  Label : 0, Prediction : 1
        False Negative :  Label : 0, Prediction : 0
        True Negative  :  Label : 1, Prediction : 0
        Precision      :  TP/(TP + FP)
        Recall         :  TP/(TP + FN)
        F Score        :  2.P.R/(P + R)
        Ranking Loss   :  The average number of label pairs that are incorrectly ordered given predictions
        Hammming Loss  :  The fraction of labels that are incorrectly predicted. (Hamming Distance between predictions and labels)
    '''
    assert predictions.shape == labels.shape, "Shapes: %s, %s" % (predictions.shape, labels.shape,)
    metrics = dict()
    if not multi_label:
        metrics['bae'] = BAE(labels, predictions)
        labels, predictions = np.argmax(labels, axis=1), np.argmax(predictions, axis=1)

        metrics['accuracy'] = accuracy_score(labels, predictions)
        metrics['micro_precision'], metrics['micro_recall'], metrics['micro_f1'], _ = \
            precision_recall_fscore_support(labels, predictions, average='micro')
        metrics['macro_precision'], metrics['macro_recall'], metrics['macro_f1'], metrics['coverage'], \
            metrics['average_precision'], metrics['ranking_loss'], metrics['pak'], metrics['hamming_loss'] \
            = 0, 0, 0, 0, 0, 0, 0, 0

    else:
        metrics['coverage'] = coverage_error(labels, predictions)
        metrics['average_precision'] = label_ranking_average_precision_score(labels, predictions)
        metrics['ranking_loss'] = label_ranking_loss(labels, predictions)

        for i in range(predictions.shape[0]):
            predictions[i, :][predictions[i, :] >= threshold] = 1
            predictions[i, :][predictions[i, :] < threshold] = 0

        metrics['bae'] = 0
        metrics['patk'] = patk(predictions, labels)
        metrics['micro_precision'], metrics['micro_recall'], metrics['micro_f1'], metrics['macro_precision'], \
            metrics['macro_recall'], metrics['macro_f1'] = bipartition_scores(labels, predictions)
    return metrics