Python torch.nn 模块,SmoothL1Loss() 实例源码

我们从Python开源项目中,提取了以下4个代码示例,用于说明如何使用torch.nn.SmoothL1Loss()

项目:action-detection    作者:yjxiong    | 项目源码 | 文件源码
def __init__(self):
        super(ClassWiseRegressionLoss, self).__init__()
        self.smooth_l1_loss = nn.SmoothL1Loss()
项目:covfefe    作者:deepnn    | 项目源码 | 文件源码
def smoothl1_loss(size_ave=True):
    return nn.SmoothL1Loss(size_average=size_ave)
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def train():    
    optimizer = optim.Adam(generator.parameters(), lr=learning_rate)
    loss = nn.SmoothL1Loss().cuda()

    num_iterations = int(sampler.num_memories / batch_size)
    num_epochs = 100

    music = Variable(torch.FloatTensor(batch_size, sampler.num_memory_channels, sampler.num_history, sampler.memory_size)).cuda()
    voice = Variable(torch.FloatTensor(batch_size, sampler.num_future_channels, sampler.future_size)).cuda()

    for epoch in range(num_epochs):
        for iteration in range(num_iterations):
            optimizer.zero_grad()

            music_cpu, voice_cpu = sampler.sample()
            music.data.resize_(music_cpu.size()).copy_(music_cpu)
            voice.data.resize_(voice_cpu.size()).copy_(voice_cpu)

            voice_gen = generator(music)
            voice_loss = loss(voice_gen, voice)

            total_loss = voice_loss

            total_loss.backward()

            optimizer.step()

            print("[{0}]: {1}".format(iteration, total_loss.data[0]))

        torch.save(generator.state_dict(), "model.pth")
        test()
项目:MachineLearning    作者:timomernick    | 项目源码 | 文件源码
def __init__(self):
        super(FFTModel, self).__init__()

        self.test_idx = 0
        self.test_frequencies, self.test_samples, self.test_ffts = get_batch(1)

        def init_conv(conv):
            conv.weight.data.normal_(0.0, 0.02)

        def init_linear(linear):
            linear.weight.data.normal_(0.0, 0.1)
            linear.bias.data.zero_()

        ndf = 1
        self.conv0 = nn.Conv1d(1, ndf, 9, bias=False)
        init_conv(self.conv0)

        self.maxPool = nn.MaxPool1d(2)

        self.relu = nn.ReLU(inplace=True)

        self.fc0_size = 5135 * ndf
        self.fc0 = nn.Linear(self.fc0_size, fft_size)

        init_linear(self.fc0)

        self.loss = nn.SmoothL1Loss(size_average=False)

        learning_rate = 0.0005
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)

        self.cuda()