Python torch.nn 模块,KLDivLoss() 实例源码

我们从Python开源项目中,提取了以下2个代码示例,用于说明如何使用torch.nn.KLDivLoss()

项目:OpenNMT-py    作者:OpenNMT    | 项目源码 | 文件源码
def __init__(self, generator, tgt_vocab, label_smoothing=0.0):
        super(NMTLossCompute, self).__init__(generator, tgt_vocab)

        # CHECK
        assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)
        # END CHECK

        if label_smoothing > 0:
            # When label smoothing is turned on,
            # KL-divergence between q_{smoothed ground truth prob.}(w)
            # and p_{prob. computed by model}(w) is minimized.
            # If label smoothing value is set to zero, the loss
            # is equivalent to NLLLoss or CrossEntropyLoss.
            # All non-true labels are uniformly set to low-confidence.
            self.criterion = nn.KLDivLoss(size_average=False)
            one_hot = torch.randn(1, len(tgt_vocab))
            one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))
            one_hot[0][self.padding_idx] = 0
            self.register_buffer('one_hot', one_hot)
        else:
            weight = torch.ones(len(tgt_vocab))
            weight[self.padding_idx] = 0
            self.criterion = nn.NLLLoss(weight, size_average=False)
        self.confidence = 1.0 - label_smoothing
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def kldiv_loss(loss_weight=None, size_ave=True):
    return nn.KLDivLoss(weight=loss_weight,size_average=size_ave)