Python chainer.links 模块,Classifier() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用chainer.links.Classifier()

项目:char-classify    作者:ekatek    | 项目源码 | 文件源码
def __init__(self, data, target, hidden_layers):
        """ Must submit either a net configuration, or something to load from """
        if hidden_layers == [] and model_filename == "":
            raise Exception("Must provide a net configuration or a file to load from")

        """ Divide the data into training and test """
        self.trainsize = int(len(data) * 5 / 6)
        self.testsize = len(data) - self.trainsize
        self.x_train, self.x_test = np.split(data, [self.trainsize])
        self.y_train, self.y_test = np.split(target, [self.trainsize])

        """ Create the underlying neural network model """
        self.sizes = [len(data[0])]
        self.sizes.extend(hidden_layers)
        self.sizes.append(len(set(target)))
        self.model = L.Classifier(BaseNetwork(self.sizes))

        """ Create the underlying optimizer """
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def check_crossing_model(gpu):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(np.float32)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    if communicator.rank == 0:
        model = L.Classifier(Cross0(
            d, communicator, rank_next, rank_prev))
    else:
        model = L.Classifier(Cross1(
            d, communicator, rank_next, rank_prev))

    if gpu:
        model.to_gpu()
        X = chainer.cuda.to_gpu(X)
        Y = chainer.cuda.to_gpu(Y)

    for i in range(n):
        err = model(X[i:i + 1], Y[i:i + 1])
        err.backward()
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def check_twisting_model(gpu):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(np.float32)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    if communicator.rank == 0:
        model = L.Classifier(
            TwistFirst(d, communicator, rank_next))
    elif communicator.rank == communicator.size - 1:
        model = L.Classifier(
            TwistLast(d, communicator, rank_prev))
    else:
        model = L.Classifier(Twist(
            d, communicator, rank_prev, rank_next))

    if gpu:
        model.to_gpu()
        X = chainer.cuda.to_gpu(X)
        Y = chainer.cuda.to_gpu(Y)

    for i in range(n):
        err = model(X[i:i + 1], Y[i:i + 1])
        err.backward()
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def main():
    model = L.Classifier(CNN())

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist(ndim=3)
    train_iter = chainer.iterators.SerialIterator(train, batch_size=100)
    test_iter = chainer.iterators.SerialIterator(test, batch_size=100, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (5, 'epoch'), out='result')

    trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy','validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
项目:Emotion_Voice_Recognition_Chainer-    作者:SnowMasaya    | 项目源码 | 文件源码
def __init__(self, x_data, y_data, iteration_number, feature, gpu = -1):
        self.N = 5000
        self.N_test = 766
        self.total = self.N + self.N_test
        self.emotion_weight = {0: self.total / 716, 1: self.total / 325, 2: self.total / 1383, 3: self.total / 743, 4: self.total / 2066, 5: self.total / 74, 6: self.total / 17, 7: self.total / 35, 8: self.total / 404,  9: self.total / 3}
        self.label_precision = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0,  9: 0}
        self.label_counter = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0,  9: 0}
        self.label_data = [0, 1, 2, 3, 4, 5, 6, 7, 8,  9]
        self.x_data = x_data.astype(np.float32)
        self.y_data = y_data.astype(np.int32)
        self.y_predict_data = [] 
        scaler = preprocessing.StandardScaler()
        self.x_data = scaler.fit_transform(self.x_data)
        self.iteration_number = iteration_number
        if feature == "IS2009":
            self.input_layer = 384
        elif feature == "IS2010":
            self.input_layer = 1582 
        self.n_units = 256
        self.output_layer = 10 
        self.batchsize = 25 
        self.model = L.Classifier(net.EmotionRecognitionVoice(self.input_layer, self.n_units, self.output_layer))
        self.gpu = gpu
        self.__set_cpu_or_gpu()
项目:Emotion_Voice_Recognition_Chainer-    作者:SnowMasaya    | 项目源码 | 文件源码
def __init__(self, x_data, y_data, feature, initmodel, gpu = -1):
        self.N = 5000
        self.N_test = 766
        self.total = self.N + self.N_test
        self.emotion_weight = {0: self.total / 716, 1: self.total / 325, 2: self.total / 1383, 3: self.total / 743, 4: self.total / 2066, 5: self.total / 74, 6: self.total / 17, 7: self.total / 35, 8: self.total / 404,  9: self.total / 3}
        self.x_data = x_data.astype(np.float32)
        self.x_data = np.vstack((self.x_data, self.x_data))
        self.y_data = y_data.astype(np.int32)
        self.y_data = np.vstack((self.y_data, self.y_data))
        if feature == "IS2009":
            self.input_layer = 384
        elif feature == "IS2010":
            self.input_layer = 1582
        self.n_units = 256
        self.output_layer = 10
        self.model = L.Classifier(net.EmotionRecognitionVoice(self.input_layer, self.n_units, self.output_layer))
        self.gpu = gpu
        self.__set_cpu_or_gpu()
        self.emotion = {0: "Anger", 1: "Happiness", 2: "Excited", 3: "Sadness", 4: "Frustration", 5: "Fear", 6: "Surprise", 7: "Other", 8: "Neutral state", 9: "Disgust"}
        # Init/Resume
        serializers.load_hdf5(initmodel, self.model)
项目:instance_normalization_chainer    作者:crcrpar    | 项目源码 | 文件源码
def main(gpu_id=-1, bs=32, epoch=20, out='./result', resume=''):
    net = ShallowConv()
    model = L.Classifier(net)
    if gpu_id >= 0:
        chainer.cuda.get_device_from_id(gpu_id)
        model.to_gpu()
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist(ndim=3)
    train_iter = chainer.iterators.SerialIterator(train, bs)
    test_iter = chainer.iterators.SerialIterator(
        test, bs, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
    trainer.extend(extensions.ParameterStatistics(model.predictor))
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id))
    trainer.extend(extensions.LogReport(log_name='parameter_statistics'))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())

    if resume:
        chainer.serializers.load_npz(resume, trainer)

    trainer.run()
项目:instance_normalization_chainer    作者:crcrpar    | 项目源码 | 文件源码
def main(gpu_id=-1, bs=32, epoch=20, out='./not_layer_result', resume=''):
    net = ShallowConv()
    model = L.Classifier(net)
    if gpu_id >= 0:
        chainer.cuda.get_device_from_id(gpu_id)
        model.to_gpu()
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist(ndim=3)
    train_iter = chainer.iterators.SerialIterator(train, bs)
    test_iter = chainer.iterators.SerialIterator(test, bs, repeat=False,
                                                 shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=gpu_id)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
    trainer.extend(extensions.ParameterStatistics(model.predictor))
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu_id))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())

    if resume:
        chainer.serializers.load_npz(resume, trainer)

    trainer.run()
项目:nfp    作者:pfnet    | 项目源码 | 文件源码
def __init__(self, d, batchsize, n_train_epoch, n_val_epoch, n_units):
        self.d = d
        self.batchsize = batchsize
        self.n_train_epoch = n_train_epoch
        self.n_val_epoch = n_val_epoch
        self.n_units = n_units
        self.model = L.Classifier(MLP(self.d, self.n_units, 2))
        self.model.o = optimizers.Adam()
        self.model.o.setup(self.model)
项目:nfp    作者:pfnet    | 项目源码 | 文件源码
def __init__(self, d, batchsize, n_train_epoch, n_val_epoch, n_units, gpu):
        self.d = d
        self.batchsize = batchsize
        self.n_train_epoch = n_train_epoch
        self.n_val_epoch = n_val_epoch
        self.n_units = n_units
        self.optimizer = optimizers.Adam()
        self.model = L.Classifier(MLP(self.d, self.n_units, 2))
        if gpu:
            self.model.to_gpu(0)
        self.optimizer.setup(self.model)
项目:char-classify    作者:ekatek    | 项目源码 | 文件源码
def __init__(self, net_size, model_filename, optimizer_filename):
        """ Create the underlying neural network model """
        self.model = L.Classifier(BaseNetwork(net_size))
        if (model_filename != ""):
            serializers.load_hdf5(model_filename, self.model)

        """ Create the underlying optimizer """
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
        if (optimizer_filename != ""):
            serializers.load_hdf5(optimizer_filename, self.optimizer)
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def setup_mnist_trainer(self, display_log=False):
        batchsize = 100
        n_units = 100

        comm = self.communicator
        model = L.Classifier(MLP(n_units, 10))

        optimizer = chainermn.create_multi_node_optimizer(
            chainer.optimizers.Adam(), comm)
        optimizer.setup(model)

        if comm.rank == 0:
            train, test = chainer.datasets.get_mnist()
        else:
            train, test = None, None

        train = chainermn.scatter_dataset(train, comm, shuffle=True)
        test = chainermn.scatter_dataset(test, comm, shuffle=True)

        train_iter = chainer.iterators.SerialIterator(train, batchsize)
        test_iter = chainer.iterators.SerialIterator(test, batchsize,
                                                     repeat=False,
                                                     shuffle=False)

        updater = training.StandardUpdater(
            train_iter,
            optimizer
        )

        return updater, optimizer, train_iter, test_iter, model
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def check_cycle_model(gpu):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10

    if communicator.rank == 0:
        X = np.random.randn(n, d).astype(np.float32)
        Y = (np.random.rand(n) * 2).astype(np.int32)
        model = L.Classifier(
            Cycle0(d, communicator, rank_next, rank_prev))

        if gpu:
            model.to_gpu()
            X = chainer.cuda.to_gpu(X)
            Y = chainer.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward()
    else:
        model = Cycle1(
            d, communicator, rank_next, rank_prev)
        if gpu:
            model.to_gpu()

        for i in range(n):
            err = model()
            err.backward()
项目:chainermn    作者:chainer    | 项目源码 | 文件源码
def check_branching_model(gpu, communicator, rank_next, rank_prev,
                          parent_model):
    n, d = 100, 10
    X = np.random.randn(n, d).astype(np.float32)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    if communicator.rank == 0:
        rank_children = [rank for rank in range(1, communicator.size)]
        model = L.Classifier(parent_model(
            d, communicator, rank_children))
        if gpu:
            model.to_gpu()
            X = chainer.cuda.to_gpu(X)
            Y = chainer.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward()
    else:
        model = BranchChild(d, communicator, 0)
        if gpu:
            model.to_gpu()

        for i in range(n):
            err = model()
            err.backward()
项目:chainer-examples    作者:nocotan    | 项目源码 | 文件源码
def main():
    unit = 1000
    batchsize = 100
    epoch = 20

    model = L.Classifier(MLP(unit, 10))

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = chainer.datasets.get_mnist()
    train_iter = chainer.iterators.SerialIterator(train, batchsize)
    test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer)
    trainer = training.Trainer(updater, (epoch, 'epoch'), out='result')

    trainer.extend(extensions.Evaluator(test_iter, model))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=(epoch, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
    trainer.extend(extensions.ProgressBar())

    trainer.run()
项目:NlpUtil    作者:trtd56    | 项目源码 | 文件源码
def __init__(self, net):
        self.model = L.Classifier(net)
项目:fontkaruta_classifier    作者:suga93    | 项目源码 | 文件源码
def get_model(model_name, n_classes):
    ''' initialize model '''
    if model_name == "SimpleCNN":
        model = L.Classifier(SimpleCNN(n_classes=n_classes), lossfun=F.softmax_cross_entropy)
    elif model_name == "MiddleCNN":
        model = L.Classifier(MiddleCNN(n_classes=n_classes), lossfun=F.softmax_cross_entropy)
    else:
        raise ValueError('Unknown model name: {}'.format(model_name))

    return model
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def main(options):

    #load the config params
    gpu = options['gpu']
    data_path = options['path_dataset']
    embeddings_path = options['path_vectors']
    n_epoch = options['epochs']
    batch_size = options['batchsize']
    test = options['test']
    embed_dim = options['embed_dim']
    freeze = options['freeze_embeddings']
    distance_embed_dim = options['distance_embed_dim']

    #load the data
    data_processor = DataProcessor(data_path)
    data_processor.prepare_dataset()
    train_data = data_processor.train_data
    test_data = data_processor.test_data

    vocab = data_processor.vocab
    cnn = CNN(n_vocab=len(vocab), input_channel=1,
                    output_channel=100, 
                    n_label=19, 
                    embed_dim=embed_dim, position_dims=distance_embed_dim, freeze=freeze)
    cnn.load_embeddings(embeddings_path, data_processor.vocab)
    model = L.Classifier(cnn)

    #use GPU if flag is set
    if gpu >= 0:
        model.to_gpu()

    #setup the optimizer
    optimizer = O.Adam()
    optimizer.setup(model)

    train_iter = chainer.iterators.SerialIterator(train_data, batch_size)
    test_iter = chainer.iterators.SerialIterator(test_data, batch_size,repeat=False, shuffle=False) 

    updater = training.StandardUpdater(train_iter, optimizer, converter=convert.concat_examples, device=gpu)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'))

    # Evaluation
    test_model = model.copy()
    test_model.predictor.train = False
    trainer.extend(extensions.Evaluator(test_iter, test_model, device=gpu, converter=convert.concat_examples))


    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))


    trainer.run()
项目:vsmlib    作者:undertherain    | 项目源码 | 文件源码
def main(options):

    #load the config params
    gpu = options['gpu']
    data_path = options['path_dataset']
    embeddings_path = options['path_vectors']
    n_epoch = options['epochs']
    batchsize = options['batchsize']
    test = options['test']
    embed_dim = options['embed_dim']
    freeze = options['freeze_embeddings']

    #load the data
    data_processor = DataProcessor(data_path, test)
    data_processor.prepare_dataset()
    train_data = data_processor.train_data
    dev_data = data_processor.dev_data
    test_data = data_processor.test_data

    vocab = data_processor.vocab
    cnn = CNN(n_vocab=len(vocab), input_channel=1,
                  output_channel=10, n_label=2, embed_dim=embed_dim, freeze=freeze)
    cnn.load_embeddings(embeddings_path, data_processor.vocab)
    model = L.Classifier(cnn)
    if gpu >= 0:
        model.to_gpu()

    #setup the optimizer
    optimizer = O.Adam()
    optimizer.setup(model)


    train_iter = chainer.iterators.SerialIterator(train_data, batchsize)
    dev_iter = chainer.iterators.SerialIterator(dev_data, batchsize,repeat=False, shuffle=False)
    test_iter = chainer.iterators.SerialIterator(test_data, batchsize,repeat=False, shuffle=False) 
    batch1 = train_iter.next()
    batch2 = dev_iter.next()
    updater = training.StandardUpdater(train_iter, optimizer, converter=util.concat_examples, device=gpu)
    trainer = training.Trainer(updater, (n_epoch, 'epoch'))

    # Evaluation
    eval_model = model.copy()
    eval_model.predictor.train = False
    trainer.extend(extensions.Evaluator(dev_iter, eval_model, device=gpu, converter=util.concat_examples))

    test_model = model.copy()
    test_model.predictor.train = False

    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))


    trainer.run()
项目:TOHO_AI    作者:re53min    | 项目源码 | 文件源码
def train(train_data, vocab, n_units=300, learning_rate_decay=0.97, seq_length=20, batch_size=20,
          epochs=20, learning_rate_decay_after=5):
    # ??????????
    model = L.Classifier(GRU(len(vocab), n_units))
    model.compute_accuracy = False

    # optimizer???
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.GradientClipping(5))  # ?????

    whole_len = train_data.shape[0]
    jump = whole_len / batch_size
    epoch = 0
    start_at = time.time()
    cur_at = start_at
    loss = 0
    plt_loss = []

    print('going to train {} iterations'.format(jump * epochs))
    for seq in range(jump * epochs):

        input_batch = np.array([train_data[(jump * j + seq) % whole_len]
                                for j in range(batch_size)])
        teach_batch = np.array([train_data[(jump * j + seq + 1) % whole_len]
                                for j in range(batch_size)])
        x = Variable(input_batch.astype(np.int32), volatile=False)
        teach = Variable(teach_batch.astype(np.int32), volatile=False)

        # ????
        loss += model(x, teach)

        # ??????
        if (seq + 1) % seq_length == 0:
            now = time.time()
            plt_loss.append(loss.data)
            print('{}/{}, train_loss = {}, time = {:.2f}'.format((seq + 1) / seq_length, jump,
                                                                 loss.data / seq_length, now - cur_at))
            # open('loss', 'w').write('{}\n'.format(loss.data / seq_length))
            cur_at = now

            model.cleargrads()
            loss.backward()
            loss.unchain_backward()
            optimizer.update()
            loss = 0

        # check point
        if (seq + 1) % 10000 == 0:
            pickle.dump(copy.deepcopy(model).to_cpu(), open('check_point', 'wb'))

        if (seq + 1) % jump == 0:
            epoch += 1
            if epoch >= learning_rate_decay_after:
                # optimizer.lr *= learning_rate_decay
                print('decayed learning rate by a factor {} to {}'.format(learning_rate_decay, optimizer.lr))

        sys.stdout.flush()

    pickle.dump(copy.deepcopy(model).to_cpu(), open('rnnlm_model', 'wb'))
    plot_loss(plt_loss)