Python chainer.functions 模块,accuracy() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.accuracy()

项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8 * 8

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8 * 4

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        # First conv layer
        h = self[0](x)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        n, nc, ns, nx, ny = h.data.shape
        h = F.reshape(h, (n, nc * ns, nx, ny))
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = x
        h = F.dropout(h, ratio=0.2, train=train)
        h = self.l1(h, train, finetune)
        h = self.l2(h, train, finetune)
        h = self.l3(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l4(h, train, finetune)
        h = self.l5(h, train, finetune)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, ratio=0.5, train=train)
        h = self.l7(h, train, finetune)
        h = self.l8(h, train, finetune)
        h = self.l9(h, train, finetune)

        h = F.sum(h, axis=-1)
        h = F.sum(h, axis=-1)
        h /= 8 * 8

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = x

        # First conv layer
        h = self[0](h)

        # Residual blocks
        for i in range(1, len(self) - 2):
            h = self[i](h, train, finetune)

        # BN, relu, pool, final layer
        h = self[-2](h)
        h = F.relu(h)
        h = F.average_pooling_2d(h, ksize=h.data.shape[2:])
        h = self[-1](h)
        h = F.reshape(h, h.data.shape[:2])

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l2(h, train, finetune)

        h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0, cover_all=True, use_cudnn=True)

        h = self.l3(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)
        h = F.dropout(h, self.dr, train)

        h = self.top(h)

        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:biaffineparser    作者:chantera    | 项目源码 | 文件源码
def compute_accuracy(self, y, t):
        arc_logits, label_logits = y
        true_arcs, true_labels = t.T

        b, l1, l2 = arc_logits.shape
        true_arcs = F.pad_sequence(true_arcs, padding=-1)
        if not self.model._cpu:
            true_arcs.to_gpu()
        arc_accuracy = F.accuracy(
            F.reshape(arc_logits, (b * l1, l2)),
            F.reshape(true_arcs, (b * l1,)),
            ignore_label=-1)

        b, l1, d = label_logits.shape
        true_labels = F.pad_sequence(true_labels, padding=-1)
        if not self.model._cpu:
            true_labels.to_gpu()
        label_accuracy = F.accuracy(
            F.reshape(label_logits, (b * l1, d)),
            F.reshape(true_labels, (b * l1,)),
            ignore_label=-1)

        accuracy = (arc_accuracy + label_accuracy) / 2
        return accuracy
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, ts = zip(*xs)
        ys = self.forward(ws, ss, ps)
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= batchsize
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        batchsize = len(xs)
        ws, cs, ls, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, cs, ls)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, ts = zip(*xs)
        ys = self.forward(ws, ss, ps)
        loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(ys, ts)])

        acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(ys, ts)])

        acc /= batchsize
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs, ts):
        """
        Inputs:
            xs (tuple(Variable, Variable, Variable)):
                each of Variables is of dim (batchsize,)
            ts Variable:
                (batchsize)
        """
        words, suffixes, caps = xs[:,:7], xs[:, 7:14], xs[:, 14:]
        h_w = self.emb_word(words)
        h_c = self.emb_caps(caps)
        h_s = self.emb_suffix(suffixes)
        h = F.concat([h_w, h_c, h_s], 2)
        batchsize, ntokens, hidden = h.data.shape
        h = F.reshape(h, (batchsize, ntokens * hidden))
        ys = self.linear(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)

        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, cs, ls, ts):
        h_w = self.emb_word(ws) #_(batchsize, windowsize, word_dim)
        h_c = self.emb_char(cs) # (batchsize, windowsize, max_char_len, char_dim)
        batchsize, windowsize, _, _ = h_c.data.shape
        # (batchsize, windowsize, char_dim)
        h_c = F.sum(h_c, 2)
        h_c, ls = F.broadcast(h_c, F.reshape(ls, (batchsize, windowsize, 1)))
        h_c = h_c / ls
        h = F.concat([h_w, h_c], 2)
        h = F.reshape(h, (batchsize, -1))
        # ys = self.linear1(h)
        h = F.relu(self.linear1(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        ys = self.linear2(h)

        loss = F.softmax_cross_entropy(ys, ts)
        acc = F.accuracy(ys, ts)
        chainer.report({
            "loss": loss,
            "accuracy": acc
            }, self)
        return loss
项目:MultimodalDL    作者:masataka46    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def compute_accuracy(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        acc = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in range(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            printr("computing accuracy ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            acc.append(compute_accuracy_batch(model, batch))

        result.append(sum(acc) / len(acc))
        printr("")

    return result
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def compute_perplexity(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        ppl = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in range(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            sys.stdout.write("\rcomputing perplexity ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            sys.stdout.flush()
            ppl.append(compute_perplexity_batch(model, batch))

        result.append(sum(ppl) / len(ppl))

        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()
    return result
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:ImageCaptioning    作者:rkuga    | 项目源码 | 文件源码
def step(self,perm,batch_index, mode, epoch): 
            if mode =='train':
                data, label=self.read_batch(perm,batch_index,self.train_data)
            else:
                data, label=self.read_batch(perm,batch_index,self.test_data)

            data = Variable(cuda.to_gpu(data))
            yl = self.network(data)

            label=Variable(cuda.to_gpu(label))

            L_network = F.softmax_cross_entropy(yl, label)
            A_network = F.accuracy(yl, label)

            if mode=='train':
                self.o_network.zero_grads()
                L_network.backward()
                self.o_network.update()


            return {"prediction": yl.data.get(),
                    "current_loss": L_network.data.get(),
                    "current_accuracy": A_network.data.get(),
            }
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def compute_accuracy(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        acc = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in xrange(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            sys.stdout.write("\rcomputing accuracy ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            sys.stdout.flush()
            acc.append(compute_accuracy_batch(model, batch))

        result.append(sum(acc) / len(acc))
        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()

    return result
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def compute_perplexity(model, buckets, batchsize=100):
    result = []
    for bucket_index, dataset in enumerate(buckets):
        ppl = []
        # split into minibatch
        if len(dataset) > batchsize:
            num_sections = len(dataset) // batchsize - 1
            if len(dataset) % batchsize > 0:
                num_sections += 1
            indices = [(i + 1) * batchsize for i in xrange(num_sections)]
            sections = np.split(dataset, indices, axis=0)
        else:
            sections = [dataset]
        # compute accuracy
        for batch_index, batch in enumerate(sections):
            sys.stdout.write("\rcomputing perplexity ... bucket {}/{} (batch {}/{})".format(bucket_index + 1, len(buckets), batch_index + 1, len(sections)))
            sys.stdout.flush()
            ppl.append(compute_perplexity_batch(model, batch))

        result.append(sum(ppl) / len(ppl))
        sys.stdout.write("\r" + stdout.CLEAR)
        sys.stdout.flush()
    return result
项目:chainer-EWC    作者:okdshin    | 项目源码 | 文件源码
def __call__(self, *args):
        x = args[:-1]
        t = args[-1]
        self.y = None
        self.loss = None
        self.accuracy = None
        self.y = self.predictor(*x)
        self.loss = F.softmax_cross_entropy(self.y, t)

        if self.stored_variable_list is not None and \
                self.fisher_list is not None:  # i.e. Stored
            for i in range(len(self.variable_list)):
                self.loss += self.lam/2. * F.sum(
                        self.fisher_list[i] *
                        F.square(self.variable_list[i][1] -
                                 self.stored_variable_list[i]))
        reporter.report({'loss': self.loss}, self)
        if self.compute_accuracy:
            self.accuracy = F.accuracy(self.y, t)
            reporter.report({'accuracy': self.accuracy}, self)
        return self.loss
项目:chainer-caption    作者:apple2373    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = self.res2(h, self.train)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        h = self.res5(h, self.train)
        h = F.average_pooling_2d(h, 7, stride=1)
        if t=="feature":
            return h
        h = self.fc(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(h, t)
            self.accuracy = F.accuracy(h, t)
            return self.loss
        else:
            return h
项目:cifar-10    作者:shiba24    | 项目源码 | 文件源码
def __call__(self, x, t, predict=False):
        h = self.bn1(self.conv1(x), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 2, stride=2)
        h = self.bn2(self.conv2(h), test=not self.train)
        h = F.max_pooling_2d(F.relu(h), 2, stride=2)
        h = F.dropout(F.relu(self.conv3(h)), ratio=0.6, train=self.train)
        h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2)
        h = F.average_pooling_2d(F.relu(self.conv5(h)), 3, stride=1)
        h = F.dropout(F.relu(self.fc6(h)), ratio=0.6, train=self.train)
        h = self.fc7(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        if predict:
            return h
        else:
            return self.loss
项目:voxcelchain    作者:hiroaki-kaneda    | 项目源码 | 文件源码
def __call__(self, x, t):
        # To solve the classification problem with "softmax", use "softmax_cross_entropy".
        h = self.fwd(x)
        loss = F.softmax_cross_entropy (h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
项目:nelder_mead    作者:owruby    | 项目源码 | 文件源码
def __call__(self, x, t):
        h = F.relu(self.l1(x))
        h = self.l2(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)

        return self.loss
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def evaluate(model, dataset, crop_margin, test_size):
    xp = model.xp
    iterator = chainer.iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False)
    acc_sum = 0
    iteration = 0
    for batch in iterator:
        image_batch = []
        label_batch = []
        for image_path, category_id, _ in batch:
            image = load_image(image_path)
            image_width, image_height = image.size
            crop_size = min(image_width, image_height) - crop_margin
            crop_rect = ((image_width - crop_size) // 2, (image_height - crop_size) // 2, crop_size, crop_size)
#            input_size = test_size
            input_size = int(round(crop_size / 32.0) * 32)
            if input_size < 64:
                input_size = 64
            elif input_size > test_size:
                input_size = test_size
            image_batch.append(transform_image(image, crop_rect, input_size))
            label_batch.append(category_id)

        x = xp.asarray(image_batch)
        t = xp.asarray(label_batch)

        with chainer.using_config('enable_backprop', False):
            with chainer.using_config('train', False):
                y = model(x)
        acc = F.accuracy(y, t)
        acc_sum += float(acc.data)
    return acc_sum / len(dataset)
项目:chainer-object-detection    作者:dsanno    | 项目源码 | 文件源码
def evaluate(model, dataset, crop_margin, test_size, batch_size):
    xp = model.xp
    iterator = chainer.iterators.SerialIterator(dataset, batch_size, repeat=False, shuffle=False)
    acc_sum = 0
    iteration = 0
    for batch in iterator:
        image_batch = []
        label_batch = []
        for image_path, category_id, _ in batch:
            image = load_image(image_path)
            image_width, image_height = image.size
            crop_size = min(image_width, image_height) - crop_margin
            crop_rect = ((image_width - crop_size) // 2, (image_height - crop_size) // 2, crop_size, crop_size)
            input_size = test_size
            image_batch.append(transform_image(image, crop_rect, input_size))
            label_batch.append(category_id)

        x = xp.asarray(image_batch)
        t = xp.asarray(label_batch)

        with chainer.using_config('enable_backprop', False):
            with chainer.using_config('train', False):
                y = model(x)
        acc = F.accuracy(y, t)
        acc_sum += float(acc.data) * batch_size
    return acc_sum / len(dataset)
项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def __call__(self, x, t=None):
        h = x
        h = F.relu(self.conv1_1(h))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), ratio=.5)
        h = F.dropout(F.relu(self.fc7(h)), ratio=.5)
        h = self.fc8(h)
        fc8 = h

        self.score = fc8

        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(fc8, t)
        self.accuracy = F.accuracy(self.score, t)
        return self.loss
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def __call__(self, x, t, train=True, finetune=False):

        h = self.l1(x, train, finetune)
        # h = F.dropout(h, self.dr, train)
        h = F.max(h, axis=-3, keepdims=False)

        h = self.l2(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)

        h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0)

        h = self.l3(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)

        # h = F.dropout(h, self.dr, train)
        h = self.l4(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)
        # h = F.dropout(h, self.dr, train)
        h = self.l5(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)
        # h = F.dropout(h, self.dr, train)
        h = self.l6(h, train, finetune)
        h = F.max(h, axis=-3, keepdims=False)

        h = self.top(h)

        h = F.max(h, axis=-3, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)
        h = F.max(h, axis=-1, keepdims=False)

        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)
        ws, ss, ps, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, ss, ps)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])


        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)
        ws, cs, ls, cat_ts, dep_ts = zip(*xs)
        cat_ys, dep_ys = self.forward(ws, cs, ls, dep_ts if self.train else None)

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])


        cat_acc /= batchsize
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, cs, cat_ts, dep_ts):
        batchsize, length = cat_ts.shape
        cat_ys, dep_ys = self.forward(ws, cs)
        cat_ys = cat_ys[1:-1]
        cat_ts = [F.reshape(x, (batchsize,)) for x \
                in F.split_axis(F.transpose(cat_ts), length, 0)]
        assert len(cat_ys) == len(cat_ts)
        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])


        # hs [(length, hidden_dim), ...]
        dep_ys = [x[1:-1] for x in dep_ys]
        dep_ts = [F.reshape(x, (length,)) for x in F.split_axis(dep_ts, batchsize, 0)]

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= length
        dep_acc /= batchsize
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, xs):
        """
        xs [(w,s,p,y), ..., ]
        w: word, c: char, l: length, y: label
        """
        batchsize = len(xs)

        if len(xs[0]) == 5:
            ws, ss, ps, cat_ts, dep_ts = zip(*xs)
            xp = chainer.cuda.get_array_module(ws[0])
            weights = [xp.array(1, 'f') for _ in xs]
        else:
            ws, ss, ps, cat_ts, dep_ts, weights = zip(*xs)

        cat_ys, dep_ys = self.forward(ws, ss, ps, dep_ts if self.train else None)

        cat_loss = reduce(lambda x, y: x + y,
            [we * F.softmax_cross_entropy(y, t) \
                    for y, t, we  in zip(cat_ys, cat_ts, weights)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) \
                for y, t in zip(cat_ys, cat_ts)]) / batchsize

        dep_loss = reduce(lambda x, y: x + y,
            [we * F.softmax_cross_entropy(y, t) \
                    for y, t, we in zip(dep_ys, dep_ts, weights)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) \
                    for y, t in zip(dep_ys, dep_ts)]) / batchsize

        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def train(args):
    model = EmbeddingTagger(args.model, 50, 20, 30)
    model.setup_training(args.embed)
    if args.initmodel:
        print('Load model from', args.initmodel)
        chainer.serializers.load_npz(args.initmodel, model)

    train = CCGBankDataset(args.model, args.train)
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    val = CCGBankDataset(args.model, args.val)
    val_iter = chainer.iterators.SerialIterator(
            val, args.batchsize, repeat=False, shuffle=False)
    optimizer = chainer.optimizers.SGD(lr=0.01)
    optimizer.setup(model)
    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.model)

    val_interval = 5000, 'iteration'
    log_interval = 200, 'iteration'
    val_model = model.copy()

    trainer.extend(extensions.Evaluator(val_iter, val_model), trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy',
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
项目:depccg    作者:masashi-y    | 项目源码 | 文件源码
def __call__(self, ws, ss, ps, ts):
        """
        xs [(w,s,p,y), ..., ]
        w: word, s: suffix, p: prefix, y: label
        """
        batchsize, length = ws.shape
        cat_ys, dep_ys = self.forward(ws, ss, ps)[1:-1]

        cat_ts = [F.reshape(x, (batchsize,)) for x \
                in F.split_axis(F.transpose(cat_ts), length, 0)]

        dep_ts = [F.reshape(x, (batchsize,)) for x \
                in F.split_axis(F.transpose(dep_ts), length, 0)]

        cat_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(cat_ys, cat_ts)])
        cat_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(cat_ys, cat_ts)])

        dep_loss = reduce(lambda x, y: x + y,
            [F.softmax_cross_entropy(y, t) for y, t in zip(dep_ys, dep_ts)])
        dep_acc = reduce(lambda x, y: x + y,
            [F.accuracy(y, t, ignore_label=IGNORE) for y, t in zip(dep_ys, dep_ts)])

        cat_acc /= length
        dep_acc /= length
        chainer.report({
            "tagging_loss": cat_loss,
            "tagging_accuracy": cat_acc,
            "parsing_loss": dep_loss,
            "parsing_accuracy": dep_acc
            }, self)
        return cat_loss + dep_loss
项目:GroupingNN    作者:tokkuman    | 项目源码 | 文件源码
def __call__(self, x, t, train=True):
        y = self.predictor(x, train)
        self.loss = F.softmax_cross_entropy(y, t)
        self.acc = F.accuracy(y, t)
        return self.loss
项目:MultimodalDL    作者:masataka46    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.accuracy = None
项目:MultimodalDL    作者:masataka46    | 项目源码 | 文件源码
def __call__(self, x, y, t):
        self.clear()
        hR = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convR1(x))), 3, stride=2)
        hR = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convR2(hR))), 3, stride=2)
        hR = F.relu(self.convR3(hR))
        hR = F.relu(self.convR4(hR))
        hR = F.max_pooling_2d(F.relu(self.convR5(hR)), 3, stride=2)
        hR = F.dropout(F.relu(self.fcR6(hR)), train=self.train)
        hR = F.dropout(F.relu(self.fcR7(hR)), train=self.train)
        hD = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convD1(y))), 3, stride=2)
        hD = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convD2(hD))), 3, stride=2)
        hD = F.relu(self.convD3(hD))
        hD = F.relu(self.convD4(hD))
        hD = F.max_pooling_2d(F.relu(self.convD5(hD)), 3, stride=2)
        hD = F.dropout(F.relu(self.fcD6(hD)), train=self.train)
        hD = F.dropout(F.relu(self.fcD7(hD)), train=self.train)
        h = F.dropout(F.relu(self.fc8(hR, hD)), train=self.train)
        h = self.fc9(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:MultimodalDL    作者:masataka46    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.accuracy = None
项目:chainer-qrnn    作者:musyoku    | 项目源码 | 文件源码
def compute_accuracy_batch(model, batch):
    source, target = make_source_target_pair(batch)
    if model.xp is cuda.cupy:
        source = cuda.to_gpu(source)
        target = cuda.to_gpu(target)
    model.reset_state()
    Y = model(source)
    return float(F.accuracy(Y, target, ignore_label=ID_PAD).data)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.accuracy = None
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.loss1 = None
        self.loss2 = None
        self.loss3 = None
        self.accuracy = None
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.accuracy = None
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.loss1 = None
        self.loss2 = None
        self.loss3 = None
        self.accuracy = None
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def __call__(self, x, t):
        self.clear()
        test = not self.train

        h = F.max_pooling_2d(
            F.relu(self.norm1(self.conv1(x), test=test)),  3, stride=2, pad=1)
        h = F.max_pooling_2d(
            F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = self.inc3c(h)
        h = self.inc4a(h)

        a = F.average_pooling_2d(h, 5, stride=3)
        a = F.relu(self.norma(self.conva(a), test=test))
        a = F.relu(self.norma2(self.lina(a), test=test))
        a = self.outa(a)
        self.loss1 = F.softmax_cross_entropy(a, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        b = F.average_pooling_2d(h, 5, stride=3)
        b = F.relu(self.normb(self.convb(b), test=test))
        b = F.relu(self.normb2(self.linb(b), test=test))
        b = self.outb(b)
        self.loss2 = F.softmax_cross_entropy(b, t)

        h = self.inc4e(h)
        h = self.inc5a(h)
        h = F.average_pooling_2d(self.inc5b(h), 7)
        h = self.out(h)
        self.loss3 = F.softmax_cross_entropy(h, t)

        self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
        self.accuracy = F.accuracy(h, t)
        return self.loss
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.accuracy = None
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def forward(x, t):
        y, = func(inputs={'data': x}, outputs=['fc8'], train=False)
        return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def _train_linear_classifier(self, model, optimizer, gpu):
        def _make_label(x):
            a = (np.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
            t = np.empty_like(a).astype(np.int32)
            t[a >= 0] = 0
            t[a < 0] = 1
            return t

        def _make_dataset(batch_size, unit_num, gpu):
            x_data = np.random.uniform(
                -1, 1, (batch_size, unit_num)).astype(np.float32)
            t_data = _make_label(x_data)
            if gpu:
                x_data = cuda.to_gpu(x_data)
                t_data = cuda.to_gpu(t_data)
            x = chainer.Variable(x_data)
            t = chainer.Variable(t_data)
            return x, t

        for _ in six.moves.range(self.EPOCH):
            x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
            model.zerograds()
            y = model(x)
            loss = F.softmax_cross_entropy(y, t)
            loss.backward()
            optimizer.update()

        x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
        y_test = model(x_test)
        return F.accuracy(y_test, t_test)
项目:jrm_ssl    作者:Fhrozen    | 项目源码 | 文件源码
def clear(self):
        self.loss = None
        self.accuracy = None
项目:jrm_ssl    作者:Fhrozen    | 项目源码 | 文件源码
def __call__(self, variables):
        self.clear()   
        y = self.encode(variables[0])
        self.loss = F.softmax_cross_entropy(y,variables[1])
        self.accuracy = F.accuracy(y,variables[1])
        return self.loss