Python caffe 模块,TRAIN 实例源码

我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用caffe.TRAIN

项目:pycaffe-yolo    作者:Zehaos    | 项目源码 | 文件源码
def __init__(self, use_gpu=True, model=[]):
        '''
        Init net.
        :param model: Network definition.
        '''
        if model == []:
            raise("model should not be empty!")
        print("Init NetTester: Use gpu: {}").format(use_gpu)
        print("Network: {}").format(model)
        if use_gpu:
            caffe.set_device(0)
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        self.__net = caffe.Net(model, caffe.TRAIN)
项目:FCN-VOC2012-Training-Config    作者:voidrank    | 项目源码 | 文件源码
def gen_net():
    caffe.set_device(1)
    caffe.set_mode_gpu()

    filename = '2007_000032.jpg'
    im = Image.open(filename)
    m = np.asarray(im, dtype=np.float32)
    m = m[:,:,::-1]
    m -= np.array((104.00698793,116.66876762,122.67891434))
    m = m.transpose((2, 0, 1))

    net = caffe.Net(
        "deploy.prototxt",
        #"train_iter_" + str(num) + ".caffemodel",
        #"/data/VGG16/caffemodel",
        "good.caffemodel",
        caffe.TRAIN)

    net.blobs["data"].reshape(1, *m.shape)
    net.blobs["data"].data[...] = m
    net.forward()
    return net
项目:FCN-VOC2012-Training-Config    作者:voidrank    | 项目源码 | 文件源码
def gen_net(num):
    caffe.set_device(0)
    caffe.set_mode_gpu()

    filename = '2007_000032.jpg'
    im = Image.open(filename)
    m = np.asarray(im, dtype=np.float32)
    m = m[:,:,::-1]
    m -= np.array((104.00698793,116.66876762,122.67891434))
    m = m.transpose((2, 0, 1))

    net = caffe.Net(
        "train_val.prototxt",
        "train_iter_" + str(num) + ".caffemodel",
        # "/data/VGG16/caffemodel",
        # "../fcn-32s/good.caffemodel",
        caffe.TRAIN)

    net.blobs["data"].reshape(1, *m.shape)
    net.blobs["data"].data[...] = m
    net.forward()
    return net
项目:metaqnn    作者:bowenbaker    | 项目源码 | 文件源码
def create_top_layer(self, phase=caffe.TRAIN, input_file="", train=True):
        if self.hp.GCN_APPROX:
            transform_param = {'scale': 0.0078125,'mean_value': 128}
        else:
            transform_param = {}
        if train:
            transform_param['mirror'] = self.hp.MIRROR
            if self.hp.CROP:
                # Adds random crops.
                transform_param['crop_size'] = self.hp.IMAGE_HEIGHT

        data, label = cl.Data(
            batch_size=self.hp.TRAIN_BATCH_SIZE if train else self.hp.EVAL_BATCH_SIZE, backend=P.Data.LMDB, name="data",
            source=input_file, ntop=2, include={'phase': phase}, transform_param=transform_param)
        return data, label

    # MAIN FUNCTION: Converts a net string to a caffe netspec.
    # Adds the data layer and accuracy layer for test/train.
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def add_batchnormscale(self, input, name):

        if True : # necessary?
            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': True }
            param = [dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name+'_bn', l)

            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': False }
            l = L.BatchNorm(input, name=name+'_bn', top=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name+'_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name+'_bn'), scale_param = { 'bias_term': True } )
            setattr(self.net_spec, name, l)
        else : # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def add_batchnormscale(self, input, name):

        if True: # necessary?
            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': True}
            param = [dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name + '_bn', l)

            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': False}
            l = L.BatchNorm(input, name=name + '_bn', top=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name + '_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name + '_bn'), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)
        else: # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_multilabel_data_layer(net, name, phase, num_classes, class_list=None):
    """ Add a MultiLabelData layer """
    include_dict = {'phase': phase}
    param = {'num_classes': num_classes}
    if phase == caffe.TRAIN:
        param['stage'] = 'TRAIN'
    elif phase == caffe.TEST:
        param['stage'] = 'VAL'
    if class_list is not None:
        assert len(class_list) == num_classes, \
            'Length of class list does not match number of classes {} vs {}'.\
            format(len(class_list), num_classes)
        param['class_list'] = class_list

    param_str = yaml.dump(param)
    net[name[0]], net[name[1]] = L.Python(name=name[0], python_param=dict(module='layers.multilabel_data', 
        layer='MultiLabelData', param_str=param_str), include=include_dict, ntop=2)
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_singlelabel_data_layer(net, name, phase, num_classes, class_list=None):
    """ Add a MultiLabelData layer """
    include_dict = {'phase': phase}
    param = {'num_classes': num_classes}
    if phase == caffe.TRAIN:
        param['stage'] = 'TRAIN'
    elif phase == caffe.TEST:
        param['stage'] = 'VAL'
    if class_list is not None:
        assert len(class_list) == num_classes, \
            'Length of class list does not match number of classes {} vs {}'.\
            format(len(class_list), num_classes)
        param['class_list'] = class_list

    param_str = yaml.dump(param)
    net[name[0]], net[name[1]] = L.Python(name=name[0], python_param=dict(module='layers.singlelabel_data', 
        layer='SingleLabelData', param_str=param_str), include=include_dict, ntop=2)
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_loss(self, net, task_layer_list, deploy):
        """ Add the loss layers """        
        # concatenate layers in the order specified by task_layer_list, compute the sigmoid
        lh.add_concat(net, bottom=task_layer_list, name='score'+self.postfix, axis=1)
        lh.add_sigmoid(net, bottom=net['score'+self.postfix], name='prob'+self.postfix, in_place=False)
        if not deploy:
            if self.loss_layer == 'Sigmoid':
                lh.add_sigmoid_entropy_loss(net, bottom=[net['score'+self.postfix], net[self.label_names]], 
                    name='loss'+self.postfix, loss_weight=1.0, phase=caffe.TRAIN)
            elif self.loss_layer == 'Square':
                lh.add_euclidean_loss(net, bottom=[net['prob'+self.postfix], net[self.label_names]], 
                    name='loss'+self.postfix, loss_weight=1.0, phase=caffe.TRAIN)
            else:
                print 'The layer type {} is not recognized!'.format(self.loss_layer)
                raise

            lh.add_multilabel_err_layer(net, bottom=[net['prob'+self.postfix], net[self.label_names]], 
                name='error'+self.postfix)
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def create_models(buckets = [25, 50, 100], phase = caffe.TRAIN):

  transformers = create_spatial_transformers(buckets, phase)
  proposal_net = create_yolo(phase)
  recog = create_recognizer(phase)

  return proposal_net, transformers, recog
项目:metaqnn    作者:bowenbaker    | 项目源码 | 文件源码
def convert(self, net_string):
        net_list = cnn.parse('net', net_string)
        net_list = StateStringUtils(self.ssp).convert_model_string_to_states(net_list)[1:]
        data, label = self.create_top_layer(caffe.TRAIN, self.hp.TRAIN_FILE, train=True)
        data1, label1 = self.create_top_layer(caffe.TEST, self.hp.VAL_FILE, train=False)
        loss, acc = self.unpack_list(net_list, data, label)
        lls = [data, data1, acc, loss]

        cc = to_proto(*lls)
        cc = self.replace_top_names(cc)
        return cc

    # Iterate over token list from parser.
项目:automatic-portrait-tf    作者:Corea    | 项目源码 | 文件源码
def main():
    net = caffe.Net(MODEL_DEF, MODEL_WEIGHT, caffe.TRAIN)

    mat = []
    for i in range(len(net.layers)):
        mat_type = net.layers[i].type
        mat_data = []
        for j in range(len(net.layers[i].blobs)):
            mat_data.append(net.layers[i].blobs[j].data)
        mat.append((mat_type, mat_data))

    dt = np.dtype([('type', np.str_, 16), ('data', np.ndarray)])
    results = np.array(mat, dtype=dt)
    results.dump(MAT_RESULT)
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def print_network_sizes(self, model_file):

        net = caffe.Net(model_file, caffe.TRAIN)
        for k, v in net.blobs.items():
            print k, v.data.shape
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def print_network_sizes(self, model_file):

        net = caffe.Net(model_file, caffe.TRAIN)
        for k, v in net.blobs.items():
            print k, v.data.shape
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def print_network_sizes(model_file) :
    net = caffe.Net(model_file, caffe.TRAIN)
    for k, v in net.blobs.items():
        print k, v.data.shape
项目:Fabrik    作者:Cloud-CV    | 项目源码 | 文件源码
def test_caffe_import(self):
        # Test 1
        data, label = L.ImageData(source='/dummy/source/', batch_size=32, ntop=2, rand_skip=0,
                                  shuffle=False, new_height=256, new_width=256, is_color=False,
                                  root_folder='/dummy/folder/',
                                  transform_param=dict(crop_size=227, mean_value=[104, 117, 123],
                                                       mirror=True, force_color=False,
                                                       force_gray=False))
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
            f.write(str(to_proto(data, label)))
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
        response = self.client.post(reverse('caffe-import'), {'file': sample_file})
        response = json.loads(response.content)
        os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
        self.assertGreaterEqual(len(response['net']['l0']['params']), 13)
        self.assertEqual(response['result'], 'success')
        # Test 2
        data, label = L.ImageData(source='/dummy/source/', batch_size=32, ntop=2, rand_skip=0,
                                  shuffle=False, new_height=256, new_width=256, is_color=False,
                                  root_folder='/dummy/folder/', include=dict(phase=caffe.TRAIN),
                                  transform_param=dict(crop_size=227, mean_file='/path/to/file',
                                                       mirror=True, force_color=False,
                                                       force_gray=False))
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
            f.write(str(to_proto(data, label)))
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
        response = self.client.post(reverse('caffe-import'), {'file': sample_file})
        response = json.loads(response.content)
        os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
        self.assertGreaterEqual(len(response['net']['l0']['params']), 13)
        self.assertEqual(response['result'], 'success')
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def _semantic_regularization(self, xSemPr, xSemLb, semReg):
        ns = self.netspec

        if self.semantics == ATTRIBUTES:
            name = 'SCoRe/semLoss'
            ns[name] = L.SigmoidCrossEntropyLoss(*[xSemPr, xSemLb], name=name,
                                                 loss_weight=semReg/(len(self.constrains)*np.sqrt(2.))*10.,
                                                 include=dict(phase=caffe.TRAIN))
        else:
            c_keys = [key for key in self.constrains.keys()]
            losses = ['SCoRe/semLoss/%s' % key for key in c_keys]
            scores = ['SCoRe/semLoss/%s/scores' % key for key in c_keys]
            labels = ['SCoRe/semLoss/%s/labels' % key for key in c_keys]

            # Slice semantic scores
            xSemPr_name = [k for k, v in ns.tops.iteritems() if v ==xSemPr][0]
            slice_scores = L.Slice(name='SCoRe/semLoss/slice_scores', bottom=[xSemPr_name], ntop=len(scores), top=scores, in_place=True,
                                   slice_point=np.cumsum(self.num_states)[:-1].tolist(),
                                   include=dict(phase=caffe.TRAIN))

            # Slice semantic labels
            xSemLb_name = [k for k, v in ns.tops.iteritems() if v ==xSemLb][0]
            slice_labels = L.Slice(name='SCoRe/semLoss/slice_labels', bottom=[xSemLb_name], ntop=len(labels), top=labels, in_place=True,
                                   slice_point=range(1, len(self.constrains)),
                                   include=dict(phase=caffe.TRAIN))

            # Add supervision to each slice
            for i, xLoss in enumerate(losses):
                ns[xLoss] = L.SoftmaxWithLoss(*[slice_scores[i], slice_labels[i]], name=xLoss, loss_weight=semReg/len(self.constrains),
                                              include=dict(phase=caffe.TRAIN))

            # Summarize supervisions for display
            ns['SCoRe/semLoss'] = L.Eltwise(*[ns[l] for l in losses], name='SCoRe/semLoss',
                                            operation=P.Eltwise.SUM, coeff=[semReg/len(self.constrains)]*len(losses),
                                            include=dict(phase=caffe.TRAIN))
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def _code_regularization(self, lCW):
        ns = self.netspec

        # Semantic codes. Needs to be initialized.
        code_shape = [sum(self.code_dim), len(self.train_classes) if self.semantics == ATTRIBUTES else sum(self.num_states)]

        name = 'SCoRe/cwReg/codewords'
        sem_cw = ns[name] = L.DummyData(name=name, shape=dict(dim=code_shape), include=dict(phase=caffe.TRAIN))

        # Classification codes.
        name = 'SCoRe/cwReg/eye'
        x = ns[name] = L.DummyData(name=name, shape=dict(dim=[code_shape[0], code_shape[0]]), include=dict(phase=caffe.TRAIN))

        name = 'SCoRe/cwReg/cls_codewords'
        clf_cw = ns[name] = L.InnerProduct(x, name=name, num_output=code_shape[1], bias_term=False,
                                           param=[{'name': lCW}], include=dict(phase=caffe.TRAIN))

        # Compute \sum |S-C|^2
        name = 'SCoRe/cwReg/diff'
        x_diff = ns[name] = L.Eltwise(*[sem_cw, clf_cw], name=name,
                                      operation=P.Eltwise.SUM, coeff=[1., -1.], include=dict(phase=caffe.TRAIN))

        name = 'SCoRe/cwReg'
        ns[name] = L.Reduction(x_diff, name=name,
                               operation=P.Reduction.SUMSQ, axis=0,
                               loss_weight=self.code_coeff, include=dict(phase=caffe.TRAIN))
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def _loss_proto(self, xPr, xLb, xSemPr, xSemLb, lCW):
        ns = self.netspec

        # Classification loss
        if self.sem_coeff < 1:
            name = 'SCoRe/objLoss'
            ns[name] = L.SoftmaxWithLoss(*[xPr, xLb], name=name, loss_weight=1.0 - self.sem_coeff, include=dict(phase=caffe.TRAIN))

        # Semantic regularization
        if self.sem_coeff > 0:
            self._semantic_regularization(xSemPr, xSemLb, self.sem_coeff)

        # Codeword regularization
        if 0 < self.code_coeff < np.inf:
            self._code_regularization(lCW)
项目:score-zeroshot    作者:pedro-morgado    | 项目源码 | 文件源码
def generate_train_proto(self, model_fn, fts_lmdb, sem_lmdb, batch_size):
        ns = self._new_model()

        # Inputs
        mean = [104., 116., 122.]
        stage = {'testRecg': 'TestRecognition',
                 'testZS': 'TestZeroShot'}
        for subset in ['train', 'testRecg', 'testZS']:
            if subset == 'train':
                include = {'phase': caffe.TRAIN}
            else:
                include = {'phase': caffe.TEST, 'stage': stage[subset]}
            ns[subset+'_data'], ns[subset+'_labels'] = L.Data(name='data', ntop=2, top=['data', 'labels'], in_place=True,
                                                              source=fts_lmdb[subset], batch_size=batch_size, backend=P.Data.LMDB,
                                                              transform_param=dict(mirror=True if subset == 'train' else False,
                                                                                   crop_size=self.base_cnn.input_size,
                                                                                   mean_value=mean),
                                                              include=include)

        # Semantic labels for training
        if self.sem_coeff > 0:
            ns.semantics = L.Data(name='semantics',
                                  source=sem_lmdb['train'], batch_size=batch_size, backend=P.Data.LMDB,
                                  include=dict(phase=caffe.TRAIN))

        # Run base CNN
        xFt = self.base_cnn.inference_proto(ns.train_data, mult=1., truncate_at=self.feat_layer)

        # Run score
        xObj, xSem, lCW = self._score_proto(xFt, source_net=True, target_net=self.test_classes is not None, mult=1.0)
        self.scores = {'obj': xObj, 'semantics': xSem}

        # Loss
        self._loss_proto(ns[xObj], ns.train_labels, ns[xSem], ns.semantics if self.sem_coeff > 0 else None, lCW)

        # Evaluation
        self._eval_proto(ns[xObj], ns.train_labels)

        with open(model_fn, 'w') as f:
            f.write(str(ns.to_proto()))
项目:deepwater-nae    作者:h2oai    | 项目源码 | 文件源码
def solver_graph(self):
        proto = caffe_pb2.SolverParameter()
        proto.type = self.cmd.solver_type
        if self.device is not None:
            proto.solver_mode = caffe_pb2.SolverParameter.SolverMode.Value(
                'GPU')
            proto.device_id = self.device
        else:
            proto.solver_mode = caffe_pb2.SolverParameter.SolverMode.Value(
                'CPU')
        proto.lr_policy = 'fixed'
        proto.base_lr = self.cmd.learning_rate
        proto.momentum = self.cmd.momentum
        proto.max_iter = int(2e9)
        proto.random_seed = self.cmd.random_seed + self.rank
        print('Setting seed ', proto.random_seed, file = sys.stderr)
        proto.display = 1

        batch = int(solver.cmd.input_shape[0] / solver.size)
        if self.cmd.graph:
            dir = os.path.dirname(os.path.realpath(__file__))
            proto.net = dir + '/' + self.cmd.graph + '.prototxt'
        else:
            proto.train_net_param.MergeFrom(self.net_def(caffe.TRAIN))
            proto.test_net_param.add().MergeFrom(self.net_def(caffe.TEST))

        proto.test_iter.append(1)
        proto.test_interval = 999999999  # cannot disable or set to 0
        proto.test_initialization = False
        return proto
项目:deepwater-nae    作者:h2oai    | 项目源码 | 文件源码
def reshape(self, bottom, top):
        batch = int(solver.cmd.input_shape[0] / solver.size)
        input_shape = [batch,
                       solver.cmd.input_shape[1],
                       solver.cmd.input_shape[2],
                       solver.cmd.input_shape[3], ]
        top[0].reshape(*input_shape)
        print('top[0] shape ', list(top[0].shape), file = sys.stderr)
        if self.phase == caffe.TRAIN:
            top[1].reshape(batch, 1)
            print('top[1] shape ', list(top[1].shape), file = sys.stderr)
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_input(self, net, deploy=False):
        """ add input layers """
        class_list = self.class_list
        num_classes = len(class_list)

        if not deploy:
            train_net = net['train']
            val_net = net['val']
            lh.add_multilabel_data_layer(train_net, name=[self.data_name, self.label_names], 
                phase=caffe.TRAIN, num_classes=num_classes, class_list=class_list)
            lh.add_multilabel_data_layer(val_net, name=[self.data_name, self.label_names], 
                phase=caffe.TEST, num_classes=num_classes, class_list=class_list)
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_input(self, net, deploy=False):
        """ add input layers """
        class_list = self.class_list
        num_classes = len(class_list)

        if not deploy:
            train_net = net['train']
            val_net = net['val']
            lh.add_singlelabel_data_layer(train_net, name=[self.data_name, self.label_names], 
                phase=caffe.TRAIN, num_classes=num_classes, class_list=class_list)
            lh.add_singlelabel_data_layer(val_net, name=[self.data_name, self.label_names], 
                phase=caffe.TEST, num_classes=num_classes, class_list=class_list)
项目:ifp    作者:morris-frank    | 项目源码 | 文件源码
def pre_transplant(inp_proto, inp_model, fcn_proto, fcn_model):
    state = caffe.TRAIN
    net = caffe.Net(inp_proto, inp_model, state)

    new_net = caffe.Net(fcn_proto, inp_model, state)

    new_net = transplant(new_net, net, '-deconv')
    new_net.save(fcn_model)
项目:ifp    作者:morris-frank    | 项目源码 | 文件源码
def perform_surgery(inp_proto, inp_model, fcn_proto, fcn_model):
    state = caffe.TRAIN

    # Load the original network and extract the fully connected layers'
    # parameters.
    net = caffe.Net(inp_proto, inp_model, state)
    params = ['fc6', 'fc7_', 'fc8_output']

    #net.blobs['data'].reshape(1, 3, 67, 67)
    # net.reshape()

    # fc_params = {name: (weights, biases)}
    fc_params = {pr: (net.params[pr][0].data, net.params[
                      pr][1].data) for pr in params}
    for fc in params:
        print '{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape)

    # Load the fully convolutional network to transplant the parameters.
    net_full_conv = caffe.Net(fcn_proto, inp_model, state)
    params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-score']

    # conv_params = {name: (weights, biases)}
    conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[
                        pr][1].data) for pr in params_full_conv}
    for conv in params_full_conv:
        print '{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape)

    for pr, pr_conv in zip(params, params_full_conv):
        print '{} = {}'.format(pr_conv, pr)
        conv_params[pr_conv][0].flat = fc_params[
            pr][0].flat  # flat unrolls the arrays
        conv_params[pr_conv][1][...] = fc_params[pr][1]

    print 'Finished unrolling.....'

    if not os.path.exists('/'.join(fcn_model.split('/')[:-1])):
        os.makedirs('/'.join(fcn_model.split('/')[:-1]))

    net_full_conv.save(fcn_model)
项目:activitynet-essentials    作者:alex-paterson    | 项目源码 | 文件源码
def __init__(self, net_path, model_path, mean_path):
        caffe.set_device(0)
        caffe.set_mode_gpu()
        self.net = caffe.Net(net_path, model_path, caffe.TRAIN)

        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2,0,1))
        self.transformer.set_mean('data', np.load(mean_path).mean(1).mean(1)) # mean pixel
        self.transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (2,1,0))  # the reference model has channels in BGR order instead of RGB

        self.net.blobs['data'].reshape(1,3,227,227)
项目:caffe-wrn-generator    作者:razorx89    | 项目源码 | 文件源码
def __create_data_layer(self, n, deploy):
        if deploy:
            n.data = L.Input(input_param={
                'shape': [
                    {
                        'dim': [
                            self.__config['deploy-batch-size'],
                            self.__config['deploy-num-channels'],
                            self.__config['crop-size'],
                            self.__config['crop-size']
                        ]
                    }
                ]
            })
        else:
            transform_param = {
                'mirror': True,
                'crop_size': self.__config['crop-size'],
                'scale': 0.00390625
            }
            if self.__config['mean-file']:
                transform_param['mean_file'] = self.__config['mean-file']

            n.data, n.label = L.Data(batch_size=self.__config['train-batch-size'],
                                     include={'phase': caffe.TRAIN},
                                     backend=P.Data.LMDB,
                                     source=self.__config['train-database'],
                                     ntop=2,
                                     transform_param=transform_param)

            transform_param = dict.copy(transform_param)
            transform_param['mirror'] = False
            n.data__test, n.label__test = L.Data(batch_size=self.__config['test-batch-size'],
                                                 include={'phase': caffe.TEST},
                                                 backend=P.Data.LMDB,
                                                 source=self.__config['test-database'],
                                                 ntop=2,
                                                 transform_param=transform_param)
        return n.data
项目:deepwater-nae    作者:h2oai    | 项目源码 | 文件源码
def net_def(self, phase):
        print('sizes', self.cmd.sizes, file = sys.stderr)
        print('types', self.cmd.types, file = sys.stderr)
        if len(self.cmd.sizes) != len(self.cmd.types):
            raise Exception

        n = caffe.NetSpec()
        name = ''

        for i in range(len(self.cmd.types)):
            if self.cmd.types[i] == 'data':
                name = 'data'
                if phase == caffe.TRAIN:
                    n[name], n.label = L.Python(
                        module = 'solver',
                        layer = 'DataLayer',
                        ntop = 2,
                    )
                else:
                    n[name] = L.Python(
                        module = 'solver',
                        layer = 'DataLayer',
                    )

            else:
                fc = L.InnerProduct(
                    n[name],
                    inner_product_param = {'num_output': self.cmd.sizes[i],
                                           'weight_filler': {'type': 'xavier',
                                                             'std': 0.1},
                                           'bias_filler': {'type': 'constant',
                                                           'value': 0}})
                name = 'fc%d' % i
                n[name] = fc

                if self.cmd.types[i] == 'relu':
                    relu = L.ReLU(n[name], in_place = True)
                    name = 'relu%d' % i
                    n[name] = relu
                elif self.cmd.types[i] == 'loss':
                    if self.cmd.regression:
                        if phase == caffe.TRAIN:
                            n.loss = L.EuclideanLoss(n[name], n.label)
                    else:
                        if phase == caffe.TRAIN:
                            n.loss = L.SoftmaxWithLoss(n[name], n.label)
                        else:
                            n.output = L.Softmax(n[name])
                else:
                    raise Exception('TODO unsupported: ' + self.cmd.types[i])

        return n.to_proto()