Python chainer 模块,report() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.report()

项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, ts = zip(*xs)
        ys = self.forward(ws, ss, ps)
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= batchsize
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        batchsize = len(xs)
        ws, cs, ls, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, cs, ls)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, ts = zip(*xs)
        ys = self.forward(ws, ss, ps)
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= batchsize
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs, ts):
        """
        Inputs:
            xs (tuple(Variable, Variable, Variable)):
                each of Variables is of dim (batchsize,)
            ts Variable:
                (batchsize)
        """
        words, suffixes, caps = xs[:,:7], xs[:, 7:14], xs[:, 14:]
        h_w = self.emb_word(words)
        h_c = self.emb_caps(caps)
        h_s = self.emb_suffix(suffixes)
        h = F.concat([h_w, h_c, h_s], 2)
        batchsize, ntokens, hidden = h.data.shape
        h = F.reshape(h, (batchsize, ntokens * hidden))
        ys = self.linear(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)

        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, ss, ps, ts):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize, length = ts.shape
        ys = self.forward(ws, ss, ps)[1:-1]
        ts = [F.squeeze(x, 0) for x in F.split_axis(F.transpose(ts), length, 0)]
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= length
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def _get_loss_gen(self):
        batchsize = self.y_fake.data.shape[0]
        L_mce = F.softmax_cross_entropy(self.pred_label_map, self.ground_truth, normalize=False)
        L_bce = F.softmax_cross_entropy(self.y_fake, Variable(self.xp.ones(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
        loss = L_mce + self.L_bce_weight * L_bce

        # log report
        label_true = chainer.cuda.to_cpu(self.ground_truth.data)
        label_pred = chainer.cuda.to_cpu(self.pred_label_map.data).argmax(axis=1)
        logs = []
        for i in six.moves.range(batchsize):
            acc, acc_cls, iu, fwavacc = utils.label_accuracy_score(
                label_true[i], label_pred[i], self.n_class)
            logs.append((acc, acc_cls, iu, fwavacc))
        log = np.array(logs).mean(axis=0)
        values = {
            'loss': loss,
            'accuracy': log[0],
            'accuracy_cls': log[1],
            'iu': log[2],
            'fwavacc': log[3],
        }
        chainer.report(values, self.gen)

        return loss
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def calc_loss(self):
        batchsize = self.ground_truth.shape[0]
        self.loss = F.softmax_cross_entropy(self.pred_label_map, self.ground_truth, normalize=False)

        # log report
        label_true = chainer.cuda.to_cpu(self.ground_truth.data)
        label_pred = chainer.cuda.to_cpu(self.pred_label_map.data).argmax(axis=1)
        logs = []
        for i in six.moves.range(batchsize):
            acc, acc_cls, iu, fwavacc = utils.label_accuracy_score(
                label_true[i], label_pred[i], self.n_class)
            logs.append((acc, acc_cls, iu, fwavacc))
        log = np.array(logs).mean(axis=0)
        values = {
            'loss': self.loss,
            'accuracy': log[0],
            'accuracy_cls': log[1],
            'iu': log[2],
            'fwavacc': log[3],
        }
        chainer.report(values, self.model)
项目:paint_transfer_c92    作者:Hiroshiba    | 项目源码 | 文件源码
def sum_loss(self, loss):
        sum_loss_enc = BaseLoss.blend_loss(loss['encoder'], self.config.blend['encoder'])
        chainer.report({'sum_loss': sum_loss_enc}, self.models.encoder)

        sum_loss_gen = BaseLoss.blend_loss(loss['generator'], self.config.blend['generator'])
        chainer.report({'sum_loss': sum_loss_gen}, self.models.generator)

        sum_loss_mismatch_discriminator = BaseLoss.blend_loss(
            loss['mismatch_discriminator'], self.config.blend['mismatch_discriminator'])
        chainer.report({'sum_loss': sum_loss_mismatch_discriminator}, self.models.mismatch_discriminator)

        return {
            'encoder': sum_loss_enc,
            'generator': sum_loss_gen,
            'mismatch_discriminator': sum_loss_mismatch_discriminator,
        }
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def __call__(self, x, t):
        # To solve the classification problem with "softmax", use "softmax_cross_entropy".
        h = self.fwd(x)
        loss = F.softmax_cross_entropy (h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def update_core(self):
        xp = self.gen.xp
        self._iter += 1

        opt_g = self.get_optimizer('gen')
        opt_d = self.get_optimizer('dis')

        data_z = self.get_latent_code_batch()
        data_x = self.get_real_image_batch()

        x_fake = self.gen(Variable(data_z))
        dis_fake = self.dis(x_fake)

        loss_gen = loss_func_lsgan_dis_real(dis_fake)
        chainer.report({'loss': loss_gen}, self.gen)

        opt_g.zero_grads()
        loss_gen.backward()
        opt_g.update()

        x_fake.unchain_backward()
        x_real = Variable(data_x)
        dis_real = self.dis(x_real)
        loss_dis = loss_func_lsgan_dis_real(dis_real) + loss_func_lsgan_dis_fake(dis_fake)

        opt_d.zero_grads()
        loss_dis.backward()
        opt_d.update()

        chainer.report({'loss': loss_dis}, self.dis)
项目:chainer-gan-experiments    作者:Aixile    | 项目源码 | 文件源码
def update_core(self):
        xp = self.gen.xp
        self._iter += 1

        opt_g = self.get_optimizer('gen')
        opt_d = self.get_optimizer('dis')

        data_z = self.get_latent_code_batch()
        data_x = self.get_real_image_batch()

        x_fake = self.gen(Variable(data_z))
        dis_fake = self.dis(x_fake)

        loss_gen = loss_func_dcgan_dis_real(dis_fake)
        chainer.report({'loss': loss_gen}, self.gen)

        opt_g.zero_grads()
        loss_gen.backward()
        opt_g.update()

        x_fake.unchain_backward()
        x_real = Variable(data_x)
        dis_real = self.dis(x_real)
        loss_dis = loss_func_dcgan_dis_real(dis_real) + loss_func_dcgan_dis_fake(dis_fake)

        opt_d.zero_grads()
        loss_dis.backward()
        opt_d.update()

        chainer.report({'loss': loss_dis}, self.dis)
项目:Comicolorization    作者:DwangoMediaVillage    | 项目源码 | 文件源码
def make_loss(self, input, concat, target, test):
        output = self.forwarder(input, concat, test)['image']
        mae_loss = chainer.functions.mean_absolute_error(output, target)

        loss = {
            'mae': mae_loss,
        }
        chainer.report(loss, self.model)

        return {
            'main': loss,
        }
项目:Comicolorization    作者:DwangoMediaVillage    | 项目源码 | 文件源码
def sum_loss(self, loss):
        sum_loss = self.blend_loss(loss, self.config.blend['main'])
        chainer.report({'sum_loss': sum_loss}, self.model)
        return sum_loss
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = F.sum(F.softplus(-y_real)) / batchsize
        L2 = F.sum(F.softplus(y_fake)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
项目:tensorboard-pytorch    作者:lanpa    | 项目源码 | 文件源码
def loss_gen(self, gen, y_fake):
        batchsize = len(y_fake)
        loss = F.sum(F.softplus(-y_fake)) / batchsize
        chainer.report({'loss': loss}, gen)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, ss, ps)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])


        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)
        ws, cs, ls, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, cs, ls, dep_ts if self.train else None)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])


        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)

        if len(xs[0]) == 6:
            ws, ss, ps, ls, cat_ts, dep_ts = zip(*xs)
            xp = chainer.cuda.get_array_module(ws[0])
            weights = [xp.array(1, 'f') for _ in xs]
        else:
            ws, ss, ps, ls, cat_ts, dep_ts, weights = zip(*xs)

        cat_ys, dep_ys = self.forward(ws, ss, ps, ls, dep_ts if self.train else None)

        cat_loss = reduce(lambda x, y: x + y,
            [we * F.softmax_cross_entropy(y, t) \
                    for y, t, we  in zip(cat_ys, cat_ts, weights)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)]) / batchsize

        dep_loss = reduce(lambda x, y: x + y,
            [we * F.softmax_cross_entropy(y, t) \
                    for y, t, we in zip(dep_ys, dep_ts, weights)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)]) / batchsize

        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, cs, cat_ts, dep_ts):
        batchsize, length = cat_ts.shape
        cat_ys, dep_ys = self.forward(ws, cs)
        cat_ys = cat_ys[1:-1]
        cat_ts = [F.reshape(x, (batchsize,)) for x \
                in F.split_axis(F.transpose(cat_ts), length, 0)]
        assert len(cat_ys) == len(cat_ts)
        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])


        # hs [(length, hidden_dim), ...]
        dep_ys = [x[1:-1] for x in dep_ys]
        dep_ts = [F.reshape(x, (length,)) for x in F.split_axis(dep_ts, batchsize, 0)]

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= length
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, ss, ps)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])


        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)

        if len(xs[0]) == 5:
            ws, ss, ps, cat_ts, dep_ts = zip(*xs)
            xp = chainer.cuda.get_array_module(ws[0])
            weights = [xp.array(1, 'f') for _ in xs]
        else:
            ws, ss, ps, cat_ts, dep_ts, weights = zip(*xs)

        cat_ys, dep_ys = self.forward(ws, ss, ps, dep_ts if self.train else None)

        cat_loss = reduce(lambda x, y: x + y,
            [we * F.softmax_cross_entropy(y, t) \
                    for y, t, we  in zip(cat_ys, cat_ts, weights)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) \
                for y, t in zip(cat_ys, cat_ts)]) / batchsize

        dep_loss = reduce(lambda x, y: x + y,
            [we * F.softmax_cross_entropy(y, t) \
                    for y, t, we in zip(dep_ys, dep_ts, weights)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) \
                    for y, t in zip(dep_ys, dep_ts)]) / batchsize

        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, ss, ps, ts):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize, length = ws.shape
        cat_ys, dep_ys = self.forward(ws, ss, ps)[1:-1]

        cat_ts = [F.reshape(x, (batchsize,)) for x \
                in F.split_axis(F.transpose(cat_ts), length, 0)]

        dep_ts = [F.reshape(x, (batchsize,)) for x \
                in F.split_axis(F.transpose(dep_ts), length, 0)]

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= length
        dep_acc /= length
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:DeepPoseComparison    作者:ynaka81    | 项目源码 | 文件源码
def __call__(self, image, x, v):
        y = self.predict(image)
        loss = mean_squared_error(y, x, v, use_visibility=self.use_visibility)
        chainer.report({'loss': loss}, self)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, D_u_rough, batchsize, alpha=0.1, beta=0.1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
        loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss_adv_unpaired = F.softmax_cross_entropy(D_u_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        #loss_line = self.line_loss(G_p_rough, p_line)
        loss = loss_L + alpha * loss_adv + beta * loss_adv_unpaired #+ loss_line
        chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv, 'loss_adv_u': loss_adv_unpaired}, gen)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_dis(self, dis, D_p_rough, p_line, D_u_rough, u_line, batchsize, alpha=0.1, beta=0.1):
        xp = self.gen.xp
        loss_fake_p = F.softmax_cross_entropy(D_p_rough, Variable(xp.ones(batchsize, dtype=np.int32)))
        loss_real_p = F.softmax_cross_entropy(self.dis(p_line), Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss_fake_u = F.softmax_cross_entropy(D_u_rough, Variable(xp.ones(batchsize, dtype=np.int32)))
        loss_real_u = F.softmax_cross_entropy(self.dis(u_line), Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss = alpha * (loss_fake_p + loss_real_p) + beta * (loss_fake_u + loss_real_u)
        chainer.report({'loss': loss, 'fake_p': loss_fake_p, 'real_p':loss_real_p, 'fake_u': loss_fake_u, 'real_u':loss_real_u}, dis)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_gen(self, gen, G_out, gt, batchsize, alpha=1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_out, gt) * G_out.data.size
        loss = loss_L
        chainer.report({'loss': loss, "loss_L": loss_L}, gen)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_gen(self, gen, G_p_rough, D_p_rough, p_line, batchsize, alpha=0.1):
        xp = self.gen.xp
        loss_L = F.mean_squared_error(G_p_rough, p_line) * G_p_rough.data.shape[0]
        loss_adv = F.softmax_cross_entropy(D_p_rough, Variable(xp.zeros(batchsize, dtype=np.int32)))
        #loss_line = self.line_loss(G_p_rough, p_line)
        loss = loss_L + alpha * loss_adv #+ loss_line
        chainer.report({'loss': loss, "loss_L": loss_L, 'loss_adv': loss_adv}, gen)
        return loss
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def loss_dis(self, dis, D_p_rough, p_line, batchsize, alpha=0.1):
        xp = self.gen.xp
        loss_fake_p = F.softmax_cross_entropy(D_p_rough, Variable(xp.ones(batchsize, dtype=np.int32)))
        loss_real_p = F.softmax_cross_entropy(self.dis(p_line), Variable(xp.zeros(batchsize, dtype=np.int32)))
        loss = alpha * (loss_fake_p + loss_real_p)
        chainer.report({'loss': loss, 'fake_p': loss_fake_p, 'real_p':loss_real_p}, dis)
        return loss
项目:chainer-VAE    作者:crcrpar    | 项目源码 | 文件源码
def __call__(self, x, sigmoid=True):
        """AutoEncoder"""
        mu, ln_var = self.encode(x)
        batchsize = len(mu.data)
        # reconstruction loss
        rec_loss = 0
        for l in six.moves.range(self.k):
            z = F.gaussian(mu, ln_var)
            rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
                / (self.k * batchsize)
        loss = rec_loss + \
            self.C * gaussian_kl_divergence(mu, ln_var) / batchsize
        chainer.report({'loss': loss}, self)
        return loss
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = self.bn1(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h)
        h = self.res3(h)
        h = self.res4(h)
        h = self.res5(h)
        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.fc(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h))
        h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h, train=self.train))
        h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000))

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = F.sum(F.softplus(-y_real)) / batchsize
        L2 = F.sum(F.softplus(y_fake)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def loss_D(self, real_D, fake_D):
        batch_size, _, h, w = real_D.shape

        loss = - F.sum(F.log(real_D + self.eps) + F.log(1 - fake_D + self.eps)) / (batch_size*h*w)
        chainer.report({'loss': loss}, self.D)

        return loss
项目:chainer-pix2pix    作者:wuhuikai    | 项目源码 | 文件源码
def loss_G(self, real_B, fake_B, fake_D):
        loss_l1 = F.mean_absolute_error(real_B, fake_B)
        chainer.report({'loss_l1': loss_l1}, self.G)

        batch_size, _, h, w = fake_D.shape
        loss_D = - F.sum(F.log(fake_D + self.eps)) / (batch_size*h*w)
        chainer.report({'loss_D': loss_D}, self.G)

        loss = loss_D + self.lambd*loss_l1
        chainer.report({'loss': loss}, self.G)

        return loss
项目:chainer-pix2pix    作者:pfnet-research    | 项目源码 | 文件源码
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize,_,w,h = y_out.data.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, enc)
        return loss
项目:chainer-pix2pix    作者:pfnet-research    | 项目源码 | 文件源码
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize,_,w,h = y_out.data.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, dec)
        return loss
项目:chainer-pix2pix    作者:pfnet-research    | 项目源码 | 文件源码
def loss_dis(self, dis, y_in, y_out):
        batchsize,_,w,h = y_in.data.shape

        L1 = F.sum(F.softplus(-y_in)) / batchsize / w / h
        L2 = F.sum(F.softplus(y_out)) / batchsize / w / h
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
项目:Semantic-Segmentation-using-Adversarial-Networks    作者:oyam    | 项目源码 | 文件源码
def _get_loss_dis(self):
        batchsize = self.y_fake.data.shape[0]
        loss = F.softmax_cross_entropy(self.y_real, Variable(self.xp.ones(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
        loss += F.softmax_cross_entropy(self.y_fake, Variable(self.xp.zeros(batchsize, dtype=self.xp.int32), volatile=not self.gen.train))
        chainer.report({'loss': loss}, self.dis)
        return loss
项目:paint_transfer_c92    作者:Hiroshiba    | 项目源码 | 文件源码
def make_loss(self, target, raw_line, test):
        xp = self.models.mismatch_discriminator.xp
        batchsize = target.shape[0]
        l_true = xp.ones(batchsize, dtype=numpy.float32)
        l_false = xp.zeros(batchsize, dtype=numpy.float32)

        raw_line_mismatch = chainer.functions.permutate(
            raw_line, indices=numpy.roll(numpy.arange(batchsize, dtype=numpy.int32), shift=1), axis=0)

        output = self.forwarder.forward(
            input=target,
            raw_line=raw_line,
            raw_line_mismatch=raw_line_mismatch,
            test=test,
        )
        generated = output['generated']
        match = output['match']
        mismatch = output['mismatch']
        z = output['z']

        mse = chainer.functions.mean_squared_error(generated, target)
        loss_gen = {'mse': mse}
        chainer.report(loss_gen, self.models.generator)

        match_lsm = utility.chainer.least_square_mean(match, l_false)
        mismatch_lsm = utility.chainer.least_square_mean(mismatch, l_true)
        loss_mismatch_discriminator = {'match_lsm': match_lsm, 'mismatch_lsm': mismatch_lsm}
        chainer.report(loss_mismatch_discriminator, self.models.mismatch_discriminator)

        fake_mismatch_lsm = utility.chainer.least_square_mean(match, l_true)
        z_l2 = chainer.functions.sum(z ** 2) / z.size
        loss_enc = {'mse': mse, 'fake_mismatch_lsm': fake_mismatch_lsm, 'activity_regularization': z_l2}
        chainer.report(loss_enc, self.models.encoder)

        return {
            'encoder': loss_enc,
            'generator': loss_gen,
            'mismatch_discriminator': loss_mismatch_discriminator,
        }
项目:chainer-DPNs    作者:oyam    | 项目源码 | 文件源码
def __call__(self, x, t):
        features = F.concat(self.features(x), axis=1)
        out = F.average_pooling_2d(features, ksize=7)
        out = self.classifier(out)

        loss = F.softmax_cross_entropy(out, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(out, t)}, self)
        return loss
项目:chainer-DPNs    作者:oyam    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = self.bn1(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h)
        h = self.res3(h)
        h = self.res4(h)
        h = self.res5(h)
        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.fc(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:chainer-DPNs    作者:oyam    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
        h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
        h = self.mlpconv4(F.dropout(h))
        h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:wavenet    作者:rampage644    | 项目源码 | 文件源码
def __call__(self, x, t, label):
         y = self.predictor(x, label)
         dims = self.xp.prod(np.array(y.shape[2:]))  # for CIFAR should be 3072

         nll = F.softmax_cross_entropy(y, t, normalize=True)
         chainer.report({'nll': nll, 'bits/dim': nll / dims}, self)
         return nll
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = 0.5 * (F.sum((y_real - self.b) ** 2)) / batchsize
        L2 = 0.5 * (F.sum((y_fake - self.a) ** 2)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def loss_gen(self, gen, y_fake):
        batchsize = len(y_fake)
        loss = 0.5 * (F.sum((y_fake - self.c) ** 2)) / batchsize
        chainer.report({'loss': loss}, gen)
        return loss
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def loss_dis(self, dis, y_fake, y_real):
        batchsize = len(y_fake)
        L1 = 0.5 * (F.sum((y_real - self.b) ** 2)) / batchsize
        L2 = 0.5 * (F.sum((y_fake - self.a) ** 2)) / batchsize
        loss = L1 + L2
        chainer.report({'loss': loss}, dis)
        return loss
项目:GAN    作者:lyakaap    | 项目源码 | 文件源码
def loss_gen(self, gen, y_fake):
        batchsize = len(y_fake)
        loss = 0.5 * (F.sum((y_fake - self.c) ** 2)) / batchsize
        chainer.report({'loss': loss}, gen)
        return loss