Python caffe 模块,TEST 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用caffe.TEST

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, model_file, weights_file, mean_file): 
        if not os.path.exists(model_file) or \
           not os.path.exists(weights_file) or \
           not os.path.exists(mean_file): 
            raise ValueError('Invalid model: {}, \nweights file: {}, \nmean file: {}'
                             .format(model_file, weights_file, mean_file))

        # Init caffe with model
        self.net_ = caffe.Net(model_file, weights_file, caffe.TEST)
        self.mean_file_ = mean_file
        self.input_shape_ = self.net_.blobs['data'].data.shape    

        # Initialize mean file
        blob_meanfile = caffe.proto.caffe_pb2.BlobProto()
        data_meanfile = open(mean_file , 'rb' ).read()
        blob_meanfile.ParseFromString(data_meanfile)
        meanfile = np.squeeze(np.array(caffe.io.blobproto_to_array(blob_meanfile)))
        self.meanfile_ = meanfile.transpose((1,2,0))
        self.meanfile_image_ = None
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, rcnn_dir, net='vgg_cnn_m_1024'): 
        model_file = os.path.join(
            rcnn_dir, 'models', 
            FastRCNNDescription.NETS[net][0], 'test.prototxt')
        pretrained_file = os.path.join(
            rcnn_dir, 'data', 'fast_rcnn_models', 
            FastRCNNDescription.NETS[net][1])

        if not os.path.exists(model_file) or \
           not os.path.exists(pretrained_file): 
            raise ValueError('Unknown net {}, use one of {}, \n'
                             'model: {}, \npretrained file: {}'
                             .format(net, 
                                     FastRCNNDescription.NETS.keys(), 
                                     model_file, pretrained_file))

        # Init caffe with model
        cfg.TEST.BBOX_REG = False
        caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
项目:temporal-segment-networks    作者:yjxiong    | 项目源码 | 文件源码
def __init__(self, net_proto, net_weights, device_id, input_size=None):
        caffe.set_mode_gpu()
        caffe.set_device(device_id)
        self._net = caffe.Net(net_proto, net_weights, caffe.TEST)

        input_shape = self._net.blobs['data'].data.shape

        if input_size is not None:
            input_shape = input_shape[:2] + input_size

        transformer = caffe.io.Transformer({'data': input_shape})

        if self._net.blobs['data'].data.shape[1] == 3:
            transformer.set_transpose('data', (2, 0, 1))  # move image channels to outermost dimension
            transformer.set_mean('data', np.array([104, 117, 123]))  # subtract the dataset-mean value in each channel
        else:
            pass # non RGB data need not use transformer

        self._transformer = transformer

        self._sample_shape = self._net.blobs['data'].data.shape
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def __init__(self, net_proto, net_weights, device_id, input_size=None):
        caffe.set_mode_gpu()
        caffe.set_device(device_id)
        self._net = caffe.Net(net_proto, net_weights, caffe.TEST)

        input_shape = self._net.blobs['data'].data.shape

        if input_size is not None:
            input_shape = input_shape[:2] + input_size

        transformer = caffe.io.Transformer({'data': input_shape})

        if self._net.blobs['data'].data.shape[1] == 3:
            transformer.set_transpose('data', (2, 0, 1))  # move image channels to outermost dimension
            transformer.set_mean('data', np.array([104, 117, 123]))  # subtract the dataset-mean value in each channel
        else:
            pass # non RGB data need not use transformer

        self._transformer = transformer

        self._sample_shape = self._net.blobs['data'].data.shape
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def net(weights=WEIGHTS):
    """
    Get the caffe net that has been trained to segment facade features.

    This initializes or re-initializes the global network with weights. There are certainly side-effects!

    The weights default to a caffe model that is part of the same sourcecode repository as this file.
    They can be changed by setting the I12_WEIGHTS environment variable, by passing a command line argument
    to some programs, or programatically (of course).

    :param weights: The weights to use for the net.
    :return:
    """
    global WEIGHTS
    global _net
    if _net is None or weights != WEIGHTS:
        if weights is not None:
            WEIGHTS = weights
        _net = caffe.Net(LAYERS, WEIGHTS, caffe.TEST)
    return _net
项目:MTCNN_face_detection_caffe    作者:LucyLu-LX    | 项目源码 | 文件源码
def __init__(self,
                 minsize = 20,
                 threshold = [0.6, 0.7, 0.7],
                 factor = 0.709,
                 fastresize = False,
                 gpuid = 0):

        self.minsize = minsize
        self.threshold = threshold
        self.factor = factor
        self.fastresize = fastresize

        model_P = './model/det1.prototxt'
        weights_P = './model/det1.caffemodel'
        model_R = './model/det2.prototxt'
        weights_R = './model/det2.caffemodel'
        model_O = './model/det3.prototxt'
        weights_O = './model/det3.caffemodel'

        caffe.set_mode_gpu()
        caffe.set_device(gpuid)

        self.PNet = caffe.Net(model_P, weights_P, caffe.TEST) 
        self.RNet = caffe.Net(model_R, weights_R, caffe.TEST)
        self.ONet = caffe.Net(model_O, weights_O, caffe.TEST)
项目:TPN    作者:myfavouritekk    | 项目源码 | 文件源码
def load_nets(args, cur_gpu):
    # initialize solver and feature net,
    # RNN should be initialized before CNN, because CNN cudnn conv layers
    # may assume using all available memory
    caffe.set_mode_gpu()
    caffe.set_device(cur_gpu)
    solver = caffe.SGDSolver(args.solver)
    if args.snapshot:
        print "Restoring history from {}".format(args.snapshot)
        solver.restore(args.snapshot)
    rnn = solver.net
    if args.weights:
        rnn.copy_from(args.weights)
    feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST)

    # apply bbox regression normalization on the net weights
    with open(args.bbox_mean, 'rb') as f:
        bbox_means = cPickle.load(f)
    with open(args.bbox_std, 'rb') as f:
        bbox_stds = cPickle.load(f)
    feature_net.params['bbox_pred_vid'][0].data[...] = \
        feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
    feature_net.params['bbox_pred_vid'][1].data[...] = \
        feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
    return solver, feature_net, rnn, bbox_means, bbox_stds
项目:TPN    作者:myfavouritekk    | 项目源码 | 文件源码
def load_models(args):

    # load rnn model
    caffe.set_mode_gpu()
    if args.gpus is None:
        caffe.set_device(args.job_id - 1)
    else:
        assert args.job_id <= len(args.gpus)
        caffe.set_device(args.gpus[args.job_id-1])
    if args.lstm_param is not '':
        rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST)
        print 'Loaded RNN network from {:s}.'.format(args.lstm_def)
    else:
        rnn_net = caffe.Net(args.lstm_def, caffe.TEST)
        print 'WARNING: dummy RNN network created.'

    # load feature model
    feature_net = caffe.Net(args.def_file, args.param, caffe.TEST)
    print 'Loaded feature network from {:s}.'.format(args.def_file)

    return feature_net, rnn_net
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def add_batchnormscale(self, input, name):

        if True : # necessary?
            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': True }
            param = [dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name+'_bn', l)

            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': False }
            l = L.BatchNorm(input, name=name+'_bn', top=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name+'_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name+'_bn'), scale_param = { 'bias_term': True } )
            setattr(self.net_spec, name, l)
        else : # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l
项目:peters-stuff    作者:peterneher    | 项目源码 | 文件源码
def add_batchnormscale(self, input, name):

        if True: # necessary?
            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': True}
            param = [dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name + '_bn', l)

            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': False}
            l = L.BatchNorm(input, name=name + '_bn', top=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name + '_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name + '_bn'), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)
        else: # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l
项目:retrieval-2016-deepvision    作者:imatge-upc    | 项目源码 | 文件源码
def __init__(self,params):

        self.dimension = params['dimension']
        self.dataset = params['dataset']
        self.pooling = params['pooling']
        # Read image lists
        with open(params['query_list'],'r') as f:
            self.query_names = f.read().splitlines()

        with open(params['frame_list'],'r') as f:
            self.database_list = f.read().splitlines()

        # Parameters needed
        self.layer = params['layer']
        self.save_db_feats = params['database_feats']

        # Init network
        if params['gpu']:
            caffe.set_mode_gpu()
            caffe.set_device(0)
        else:
            caffe.set_mode_cpu()
        print "Extracting from:", params['net_proto']
        cfg.TEST.HAS_RPN = True
        self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
项目:Caffe-Python-Tutorial    作者:tostq    | 项目源码 | 文件源码
def eval_prune_threshold(threshold_list, test_prototxt, caffemodel, prune_layers):
    def net_prune(threshold, test_prototx, caffemodel, prune_layers):
        test_net = caffe.Net(test_prototx, caffemodel, caffe.TEST)
        return prune(threshold, test_net, prune_layers)

    accuracy = []
    for threshold in threshold_list:
        results = net_prune(threshold, test_prototxt, caffemodel, prune_layers)
        print 'threshold: ', results[0]
        print '\ntotal_percentage: ', results[1]
        print '\npercentage_list: ', results[2]
        print '\ntest_loss: ', results[3]
        print '\naccuracy: ', results[4]
        accuracy.append(results[4])
    plt.plot(accuracy,'r.')
    plt.show()

# ?????????
项目:blcf    作者:willard-yuan    | 项目源码 | 文件源码
def __init__(self, model_file, pretrained_file, mean_value=None,
        layer=['pool5'], input_size = None ):

        caffe.set_mode_gpu()
        caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)

        # get name input layer
        self.list_layers = layer
        self.mean_value = mean_value

        # set transformer object
        self.transformer = caffe.io.Transformer({'data': self.blobs['data'].data.shape})
        self.transformer.set_transpose( 'data', (2,0,1) )

        if mean_value is not None:
            self.transformer.set_mean('data', mean_value)

        self.transformer.set_raw_scale('data', 255)
        self.transformer.set_channel_swap('data', (2,1,0))


        if input_size is not None:
            #reshape the input
            print "New input! {}".format(input_size)
            self.reshape_input( input_size[0], input_size[1],  input_size[2], input_size[3]  )
项目:UVA    作者:chiachun    | 项目源码 | 文件源码
def set_caffe(cfg):
    model = cfg.VGG_model
    weights = cfg.VGG_weights
    if cfg.cpu_caffe:
        caffe.set_mode_cpu()
    net = caffe.Net(model, weights, caffe.TEST)

    # Set up transformer
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2,0,1))
    transformer.set_mean('data',np.array([129.1863,104.7624,93.5940]))

    # BGR -> RGB
    transformer.set_channel_swap('data', (2,1,0))
    transformer.set_raw_scale('data',255)
    return net, transformer
项目:flownet2-tf    作者:sampepose    | 项目源码 | 文件源码
def main():
    # Create tempfile to hold prototxt
    tmp = tempfile.NamedTemporaryFile(mode='w', delete=True)

    # Parse prototxt and inject `vars`
    proto = open(arch['DEPLOY_PROTOTXT']).readlines()
    for line in proto:
        for key, value in vars.items():
            tag = "$%s$" % key
            line = line.replace(tag, str(value))
        tmp.write(line)
    tmp.flush()

    # Instantiate Caffe Model
    net = caffe.Net(tmp.name, arch['CAFFEMODEL'], caffe.TEST)

    out = {}
    for (caffe_param, tf_param) in arch['PARAMS'].items():
        # Caffe stores weights as (channels_out, channels_in, h, w)
        # but TF expects (h, w, channels_in, channels_out)
        out[tf_param + '/weights'] = net.params[caffe_param][0].data.transpose((2, 3, 1, 0))
        out[tf_param + '/biases'] = net.params[caffe_param][1].data

    np.save(FLAGS.out, out)
项目:TF-Examples    作者:CharlesShang    | 项目源码 | 文件源码
def load_caffe(img_p, layers=50):
    caffe.set_mode_cpu()

    prototxt = "data/ResNet-%d-deploy.prototxt" % layers
    caffemodel = "data/ResNet-%d-model.caffemodel" % layers
    # net = caffe.Net(prototxt, caffe.TEST)
    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    net.blobs['data'].data[0] = img_p.transpose((2, 0, 1))
    assert net.blobs['data'].data[0].shape == (3, 224, 224)
    net.forward()

    caffe_prob = net.blobs['prob'].data[0]
    print_prob(caffe_prob)

    return net


# returns the top1 string
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_multilabel_data_layer(net, name, phase, num_classes, class_list=None):
    """ Add a MultiLabelData layer """
    include_dict = {'phase': phase}
    param = {'num_classes': num_classes}
    if phase == caffe.TRAIN:
        param['stage'] = 'TRAIN'
    elif phase == caffe.TEST:
        param['stage'] = 'VAL'
    if class_list is not None:
        assert len(class_list) == num_classes, \
            'Length of class list does not match number of classes {} vs {}'.\
            format(len(class_list), num_classes)
        param['class_list'] = class_list

    param_str = yaml.dump(param)
    net[name[0]], net[name[1]] = L.Python(name=name[0], python_param=dict(module='layers.multilabel_data', 
        layer='MultiLabelData', param_str=param_str), include=include_dict, ntop=2)
项目:deep_share    作者:luyongxi    | 项目源码 | 文件源码
def add_singlelabel_data_layer(net, name, phase, num_classes, class_list=None):
    """ Add a MultiLabelData layer """
    include_dict = {'phase': phase}
    param = {'num_classes': num_classes}
    if phase == caffe.TRAIN:
        param['stage'] = 'TRAIN'
    elif phase == caffe.TEST:
        param['stage'] = 'VAL'
    if class_list is not None:
        assert len(class_list) == num_classes, \
            'Length of class list does not match number of classes {} vs {}'.\
            format(len(class_list), num_classes)
        param['class_list'] = class_list

    param_str = yaml.dump(param)
    net[name[0]], net[name[1]] = L.Python(name=name[0], python_param=dict(module='layers.singlelabel_data', 
        layer='SingleLabelData', param_str=param_str), include=include_dict, ntop=2)
项目:infantSeg    作者:ginobilinie    | 项目源码 | 文件源码
def printNetwork_weights(prototxt_filename, caffemodel_filename):
    '''
    For each CNN layer, print weight heatmap and weight histogram 
    '''
    net = caffe.Net(prototxt_filename,caffemodel_filename, caffe.TEST)
    for layerName in net.params: 
        # display the weights
        arr = net.params[layerName][0].data
        plt.clf()
        fig = plt.figure(figsize=(10,10))
        ax = fig.add_subplot(111)
        cax = ax.matshow(arr, interpolation='none')
        fig.colorbar(cax, orientation="horizontal")
        plt.savefig('{0}_weights_{1}.png'.format(caffemodel_filename, layerName), dpi=100, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
        plt.close()

        # weights histogram  
        plt.clf()
        plt.hist(arr.tolist(), bins=20)
        plt.savefig('{0}_weights_hist_{1}.png'.format(caffemodel_filename, layerName), dpi=100, format='png', bbox_inches='tight') # use format='svg' or 'pdf' for vectorial pictures
        plt.close()
项目:toothless    作者:ratt-ru    | 项目源码 | 文件源码
def get_net(caffemodel, deploy_file, use_gpu=True):
    """
    Returns an instance of caffe.Net

    Arguments:
    caffemodel -- path to a .caffemodel file
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    use_gpu -- if True, use the GPU for inference
    """
    #if use_gpu:
    #    caffe.set_mode_gpu()
    caffe.set_mode_cpu()

    # load a new model
    return caffe.Net(deploy_file, caffemodel, caffe.TEST)

# Transformer function to perform image transformation
项目:GTSRB-caffe-model    作者:magnusja    | 项目源码 | 文件源码
def get_net(caffemodel, deploy_file, use_gpu=True):
    """
    Returns an instance of caffe.Net

    Arguments:
    caffemodel -- path to a .caffemodel file
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    use_gpu -- if True, use the GPU for inference
    """
    if use_gpu:
        caffe.set_mode_gpu()

    # load a new model
    return caffe.Net(deploy_file, caffemodel, caffe.TEST)
项目:CNN_Visualization    作者:albioTQ    | 项目源码 | 文件源码
def __init__(self, model_path,
                 weights_path,
                 mean_path=None,
                 image_scale=255.0,
                 batch_size=1,
                 input_shape=(227, 227)):

        self.net = caffe.Net(model_path,      # defines the structure of the model
                        weights_path,  # contains the trained weights
                        caffe.TEST)     # use test mode (e.g., don't perform dropout)

        self.net.blobs['data'].reshape(batch_size, 3, input_shape[0], input_shape[1])
        self.net.blobs['prob'].reshape(batch_size, )

        self.mean_path = mean_path
        self.image_scale = image_scale
        self.batch_size = batch_size

        self.transformer = self.set_transformer()
项目:prepare-faces-zyf    作者:walkoncross    | 项目源码 | 文件源码
def get_aligner(caffe_model_path, use_more_stage=False):
    caffe.set_mode_gpu()
#    PNet = caffe.Net(caffe_model_path + "/det1.prototxt",
#                     caffe_model_path + "/det1.caffemodel", caffe.TEST)
    if use_more_stage:
        RNet = caffe.Net(caffe_model_path + "/det2.prototxt",
                         caffe_model_path + "/det2.caffemodel", caffe.TEST)
    else:
        RNet = None

    ONet = caffe.Net(caffe_model_path + "/det3.prototxt",
                     caffe_model_path + "/det3.caffemodel", caffe.TEST)

    LNet = caffe.Net(caffe_model_path + "/det4.prototxt",
                     caffe_model_path + "/det4.caffemodel", caffe.TEST)

#    return (PNet, RNet, ONet)
    return (RNet, ONet, LNet)
#    return (RNet, ONet, None)
项目:prepare-faces-zyf    作者:walkoncross    | 项目源码 | 文件源码
def get_aligner(caffe_model_path, use_more_stage=False):
    caffe.set_mode_gpu()
#    PNet = caffe.Net(caffe_model_path + "/det1.prototxt",
#                     caffe_model_path + "/det1.caffemodel", caffe.TEST)
    if use_more_stage:
        RNet = caffe.Net(caffe_model_path + "/det2.prototxt",
                         caffe_model_path + "/det2.caffemodel", caffe.TEST)
    else:
        RNet = None

    ONet = caffe.Net(caffe_model_path + "/det3.prototxt",
                     caffe_model_path + "/det3.caffemodel", caffe.TEST)

    LNet = caffe.Net(caffe_model_path + "/det4.prototxt",
                     caffe_model_path + "/det4.caffemodel", caffe.TEST)

#    return (PNet, RNet, ONet)
    return (RNet, ONet, LNet)
#    return (RNet, ONet, None)
项目:Cuppa    作者:flipkart-incubator    | 项目源码 | 文件源码
def __load_caffe_model(self, blob_path, prototxt_path):
        """
        Load caffe model to memory
        Args:
            blob_path: Model in HDF5 format
            prototxt_path: Prototxt file. Contains Network implementation

        Returns:

        """
        net = caffe.Net(prototxt_path, blob_path, caffe.TEST)
        input_layer = net.inputs[0]
        output_layer = net.outputs[0]
        #height = net.blobs["data_q"].data.shape[2]
        #width = net.blobs["data_q"].data.shape[3]
        height = net.blobs[input_layer].data.shape[2]
        width = net.blobs[input_layer].data.shape[3]
        self.logger.info("Model has been successfully loaded from Blob:" + blob_path + " , Prototxt:" + prototxt_path)
        return net, height, width
项目:Barebones-Flask-and-Caffe-Classifier    作者:alex-paterson    | 项目源码 | 文件源码
def get_net(caffemodel, deploy_file, use_gpu=True):
    """
    Returns an instance of caffe.Net

    Arguments:
    caffemodel -- path to a .caffemodel file
    deploy_file -- path to a .prototxt file

    Keyword arguments:
    use_gpu -- if True, use the GPU for inference
    """
    if use_gpu:
        caffe.set_mode_gpu()

    # load a new model
    return caffe.Net(deploy_file, caffemodel, caffe.TEST)
项目:ifp    作者:morris-frank    | 项目源码 | 文件源码
def main(argv):
    sport = 'long_jump'
    model = 'snap_iter_50000.caffemodel'
    #---
    weights = model_root + 'fcn/' + sport + '/' + model
    netf = './fcn/' + sport + '/deploy.prototxt'

    gpu = 0
    caffe.set_device(gpu)
    caffe.set_mode_gpu()

    net = caffe.Net(netf, weights, caffe.TEST)
    im_head = '/export/home/mfrank/data/OlympicSports/clips/'
    im_head = '/export/home/mfrank/data/OlympicSports/patches/'
    test_path_file = 'fcn/' + sport + '/test.txt'
    train_path_file = 'fcn/' + sport + '/train.txt'

    inferfile(net, train_path_file, im_head)
    ifp_morris.apply_overlayfcn(train_path_file, factor=4)

    inferfile(net, test_path_file, im_head)
    ifp_morris.apply_overlayfcn(test_path_file, factor=4)
项目:caffeNetViewer    作者:birolkuyumcu    | 项目源码 | 文件源码
def loadLayers(self):
        mname = str(self.ui.comboBoxModel.currentText())
        wname = str(self.ui.comboBoxWeights.currentText())
        self.net = caffe.Net(mname,wname , caffe.TEST)
        out = self.net.blobs
        self.layerList = out.keys()
        self.ui.comboBoxLayers.clear()
        for ln in self.layerList :
            self.ui.comboBoxLayers.addItem(ln)

        self.ui.plainTextEdit.clear()
        self.ui.plainTextEdit.appendPlainText('Caffe Model Loaded...')
        self.ui.plainTextEdit.appendPlainText('  Model Name : '+mname)
        self.ui.plainTextEdit.appendPlainText('  Weights Name : '+wname)

        self.ui.plainTextEdit.appendPlainText("Network Layers ...")

        for name, layer in zip(self.net._layer_names, self.net.layers):
            if not name in self.layerList :
                continue
            msg = "   "+name +" --> "+str(layer.type) +" --> "+ str((self.net.blobs[name].data[0]).shape)
            self.ui.plainTextEdit.appendPlainText(msg)      

        self.isModelLoaded = True
项目:jenova    作者:dungba88    | 项目源码 | 文件源码
def run(self, _, app_context):
        """run the action"""
        import caffe

        # init CPU/GPU mode
        cpu_mode = app_context.get_config('caffe.cpu_mode')
        if cpu_mode:
            caffe.set_mode_cpu()
        else:
            caffe.set_mode_gpu()
            caffe.set_device(0)

        # load test model
        test_model_file = "models/" + app_context.get_config('caffe.test_model')
        trained_data_file = "cache/data/" + app_context.get_config('caffe.trained_data')
        test_net = caffe.Net(test_model_file, trained_data_file, caffe.TEST)
        app_context.params['test_net'] = test_net

        logging.getLogger(__name__).info('Loaded neural network: ' + trained_data_file)
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def load_caffe(path_prototxt='weights/resnetFCN.prototxt',
               path_weights='weights/resnetFCN.caffemodel',
               out_path='weights/resnetFCN.npy',
               version='V1'):

    # Load the caffe network
    print (' --> Loading the caffe weights...')
    net_caffe = caffe.Net(path_prototxt, path_weights, caffe.TEST)
    layers_caffe = dict(zip(list(net_caffe._layer_names), net_caffe.layers))

    # Convert weights
    print (' --> Converting the caffe weights to numpy...')
    weights_caffe = convert_weights(layers_caffe, v=version)

    # Save weights
    print (' --> Saving the weights in numpy...')
    np.save(out_path, weights_caffe)


# Entry point of the script
项目:cv-api    作者:yasunorikudo    | 项目源码 | 文件源码
def __init__(self):
        # load MS COCO labels
        labelmap_file = os.path.join(CAFFE_ROOT, LABEL_MAP)
        file = open(labelmap_file, 'r')
        self._labelmap = caffe_pb2.LabelMap()
        text_format.Merge(str(file.read()), self._labelmap)

        model_def = os.path.join(CAFFE_ROOT, PROTO_TXT)
        model_weights = os.path.join(CAFFE_ROOT, CAFFE_MODEL)

        self._net = caffe.Net(model_def, model_weights, caffe.TEST)
        self._transformer = caffe.io.Transformer(
            {'data': self._net.blobs['data'].data.shape})
        self._transformer.set_transpose('data', (2, 0, 1))
        self._transformer.set_mean('data', np.array([104, 117, 123]))
        self._transformer.set_raw_scale('data', 255)
        self._transformer.set_channel_swap('data', (2, 1, 0))

        # set net to batch size of 1
        image_resize = IMAGE_SIZE
        self._net.blobs['data'].reshape(1, 3, image_resize, image_resize)
项目:Style-Transfer-In-Tensorflow    作者:JiangQH    | 项目源码 | 文件源码
def _loadModel(self, model_dirs, id):
        print 'loading model...from{}'.format(model_dirs)
        model_file = osp.join(model_dirs, 'vgg16.prototxt')
        model_weights = osp.join(model_dirs, 'vgg16.caffemodel')
        mean_file = osp.join(model_dirs, 'vgg16_mean.npy')
        if id == -1:
            caffe.set_mode_cpu()
        else:
            caffe.set_mode_gpu()
            caffe.set_device(id)
        net = caffe.Net(model_file, model_weights, caffe.TEST)
        transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
        transformer.set_mean('data', np.load(mean_file).mean(1).mean(1))
        transformer.set_channel_swap('data', (2, 1, 0))
        transformer.set_transpose('data', (2, 0, 1))
        #transformer.set_raw_scale('data', 255)
        self.net = net
        self.transformer = transformer
        self.style_layers = VGG16_STYLES
        self.content_layers = VGG16_CONTENTS
        self.layers = VGG16_LAYERS
        print 'model loading done'
项目:SceneUnderstanding_CIARP_2017    作者:verlab    | 项目源码 | 文件源码
def __init__(self, model_path, deploy_path, mean, crop = 227, layer = 'fc7'):



        self.net = caffe.Net(deploy_path, model_path, caffe.TEST)
        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})

        self.transformer.set_mean('data', mean)
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_channel_swap('data', (2, 1, 0))
        self.transformer.set_raw_scale('data', 255.0)
        self.crop = crop
        self.image = []
        self.layer = layer
        caffe.set_mode_gpu()

        print "Mean:", mean
项目:img_classifier_prepare    作者:zonekey    | 项目源码 | 文件源码
def __init__(self, deploy, pretrained, mean, labels, gpu = False):
        if gpu:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()    # in windows, only CPU mode supported

        self.__labels = self.load_labels(labels);
        mean_ar = self.convert(mean)

        if True:
            self.__net = caffe.Classifier(deploy, pretrained,
                    mean = mean_ar.mean(1).mean(1),
                    channel_swap = (2, 1, 0), 
                    raw_scale = 255,
                    image_dims = (256, 256))
        else: 
            self.__net = caffe.Net(deploy, pretrained, caffe.TEST)
            print self.__net.blobs['data'].data.shape    

            self.__transformer = caffe.io.Transformer({'data': self.__net.blobs['data'].data.shape})
            self.__transformer.set_transpose('data', (2,0,1)) # height*width*channel -> channel*height*width
            self.__transformer.set_mean('data', mean_ar)
            self.__transformer.set_raw_scale('data', 255)
            self.__transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def load_caffe(path_prototxt='weights/resnetFCN.prototxt',
               path_weights='weights/resnetFCN.caffemodel',
               out_path='weights/resnetFCN.npy',
               version='V1'):

    # Load the caffe network
    print (' --> Loading the caffe weights...')
    net_caffe = caffe.Net(path_prototxt, path_weights, caffe.TEST)
    layers_caffe = dict(zip(list(net_caffe._layer_names), net_caffe.layers))

    # Convert weights
    print (' --> Converting the caffe weights to numpy...')
    weights_caffe = convert_weights(layers_caffe, v=version)

    # Save weights
    print (' --> Saving the weights in numpy...')
    np.save(out_path, weights_caffe)


# Entry point of the script
项目:anet2016-cuhk    作者:yjxiong    | 项目源码 | 文件源码
def __init__(self, net_proto, net_weights, device_id, input_size=None):
        caffe.set_mode_gpu()
        caffe.set_device(device_id)
        self._net = caffe.Net(net_proto, net_weights, caffe.TEST)

        input_shape = self._net.blobs['data'].data.shape

        if input_size is not None:
            input_shape = input_shape[:2] + input_size

        transformer = caffe.io.Transformer({'data': input_shape})

        if self._net.blobs['data'].data.shape[1] == 3:
            transformer.set_transpose('data', (2, 0, 1))  # move image channels to outermost dimension
            transformer.set_mean('data', np.array([104, 117, 123]))  # subtract the dataset-mean value in each channel
        else:
            pass # non RGB data need not use transformer

        self._transformer = transformer

        self._sample_shape = self._net.blobs['data'].data.shape
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def dump_caffemodel_weights():
  net = caffe.Net(args.prototxt_path, args.caffemodel_path, caffe.TEST)
  weights = {}
  n_layers = len(net.layers)
  for i in range(n_layers):
    layer_name = net._layer_names[i]
    layer = net.layers[i]
    layer_blobs = [o.data for o in layer.blobs]
    weights[layer_name] = layer_blobs
  joblib.dump(weights, args.caffe_weights_path)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, rcnn_dir, with_rpn=True, net='zf', model_dir='pascal_voc', opt_dir='fast_rcnn_end2end'): 
        """
        net: vgg16, zf
        model_dir: [pascal_voc, coco]
        opt_dir: [fast_rcnn, fast_rcnn_alt_opt, fast_rcnn_end2end]
        """

        model_file = os.path.join(
            rcnn_dir, 'models', model_dir, 
            FasterRCNNDescription.NETS[net][0], 
            opt_dir, 'test.prototxt')
        pretrained_file = os.path.join(
            rcnn_dir, 'data', 'faster_rcnn_models', 
            FasterRCNNDescription.NETS[net][1])

        if not os.path.exists(model_file) or \
           not os.path.exists(pretrained_file): 
            raise ValueError('Unknown net {}, use one of {}, \n'
                             'model: {}, \npretrained file: {}'
                             .format(net, 
                                     FasterRCNNDescription.NETS.keys(), 
                                     model_file, pretrained_file))

        # Init caffe with model
        cfg.TEST.HAS_RPN = with_rpn
        cfg.TEST.BBOX_REG = False
        caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, model_file, weights_file): 
        if not os.path.exists(model_file) or \
           not os.path.exists(weights_file): 
            raise ValueError('Invalid model: {}, \nweights file: {}'
                             .format(model_file, weights_file))

        # Init caffe with model
        self.net_ = caffe.Net(model_file, weights_file, caffe.TEST)
        self.input_shape_ = self.net_.blobs['data'].data.shape
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
                 rpn_test_prototxt=None):
    """Use a trained RPN to generate proposals.
    """

    cfg.TEST.RPN_PRE_NMS_TOP_N = -1     # no pre NMS filtering
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000  # limit top boxes after NMS
    print 'RPN model: {}'.format(rpn_model_path)
    print('Using config:')
    pprint.pprint(cfg)

    import caffe
    _init_caffe(cfg)

    # NOTE: the matlab implementation computes proposals on flipped images, too.
    # We compute them on the image once and then flip the already computed
    # proposals. This might cause a minor loss in mAP (less proposal jittering).
    imdb = get_imdb(imdb_name)
    print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)

    # Load RPN and configure output directory
    rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)
    # Generate proposals on the imdb
    rpn_proposals = imdb_proposals(rpn_net, imdb)
    # Write proposals to disk and send the proposal file path through the
    # multiprocessing queue
    rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
    rpn_proposals_path = os.path.join(
        output_dir, rpn_net_name + '_proposals.pkl')
    with open(rpn_proposals_path, 'wb') as f:
        cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
    print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
    queue.put({'proposal_path': rpn_proposals_path})
项目:vqa-mcb    作者:akirafukui    | 项目源码 | 文件源码
def setup():
    global resnet_mean
    global resnet_net
    global vqa_net
    # data provider
    vqa_data_provider_layer.CURRENT_DATA_SHAPE = EXTRACT_LAYER_SIZE

    # mean substraction
    blob = caffe.proto.caffe_pb2.BlobProto()
    data = open( RESNET_MEAN_PATH , 'rb').read()
    blob.ParseFromString(data)
    resnet_mean = np.array( caffe.io.blobproto_to_array(blob)).astype(np.float32).reshape(3,224,224)
    resnet_mean = np.transpose(cv2.resize(np.transpose(resnet_mean,(1,2,0)), (448,448)),(2,0,1))

    # resnet
    caffe.set_device(GPU_ID)
    caffe.set_mode_gpu()

    resnet_net = caffe.Net(RESNET_LARGE_PROTOTXT_PATH, RESNET_CAFFEMODEL_PATH, caffe.TEST)

    # our net
    vqa_net = caffe.Net(VQA_PROTOTXT_PATH, VQA_CAFFEMODEL_PATH, caffe.TEST)

    # uploads
    if not os.path.exists(UPLOAD_FOLDER):
        os.makedirs(UPLOAD_FOLDER)

    if not os.path.exists(VIZ_FOLDER):
        os.makedirs(VIZ_FOLDER)

    print 'Finished setup'
项目:fast-image-retrieval    作者:xueeinstein    | 项目源码 | 文件源码
def feed_net(model_file, deploy_file, imagemean_file, image_files, show_pred):
    """feed network"""
    n_files = len(image_files)
    net = caffe.Net(deploy_file, model_file, caffe.TEST)

    # define transformer for preprocessing
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_mean('data', np.load(imagemean_file).mean(1).mean(1))
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_channel_swap('data', (2, 1, 0))
    transformer.set_raw_scale('data', 255.0)

    net.blobs['data'].reshape(n_files, 3, 227, 227)

    idx = 0
    for image in image_files:
        try:
            im = caffe.io.load_image(image)
            transformed_im = transformer.preprocess('data', im)
            net.blobs['data'].data[idx, :, :, :] = transformed_im
            idx += 1
        except Exception:
            pass

    out = net.forward()
    if show_pred:
        print(out['prob'].argmax())
    return net
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
                 rpn_test_prototxt=None):
    """Use a trained RPN to generate proposals.
    """

    cfg.TEST.RPN_PRE_NMS_TOP_N = -1     # no pre NMS filtering
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000  # limit top boxes after NMS
    print 'RPN model: {}'.format(rpn_model_path)
    print('Using config:')
    pprint.pprint(cfg)

    import caffe
    _init_caffe(cfg)

    # NOTE: the matlab implementation computes proposals on flipped images, too.
    # We compute them on the image once and then flip the already computed
    # proposals. This might cause a minor loss in mAP (less proposal jittering).
    imdb = get_imdb(imdb_name)
    print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)

    # Load RPN and configure output directory
    rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)
    # Generate proposals on the imdb
    rpn_proposals = imdb_proposals(rpn_net, imdb)
    # Write proposals to disk and send the proposal file path through the
    # multiprocessing queue
    rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
    rpn_proposals_path = os.path.join(
        output_dir, rpn_net_name + '_proposals.pkl')
    with open(rpn_proposals_path, 'wb') as f:
        cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
    print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
    queue.put({'proposal_path': rpn_proposals_path})
项目:deeplab_v1_tf1.0    作者:automan000    | 项目源码 | 文件源码
def main():
    """Extract and save network skeleton with the corresponding weights.

    Raises:
      ImportError: PyCaffe module is not found."""
    args = get_arguments()
    sys.path.append(args.pycaffe_path)
    try:
        import caffe
    except ImportError:
        raise
    # Load net definition.
    net = caffe.Net('./util/deploy.prototxt', args.caffemodel, caffe.TEST)

    # Check the existence of output_dir.
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # Net skeleton with parameters names and shapes.
    # In TF, the filter shape is as follows: [ks, ks, input_channels, output_channels],
    # while in Caffe it looks like this: [output_channels, input_channels, ks, ks].
    net_skeleton = list() 
    for name, item in net.params.iteritems():
        net_skeleton.append([name + '/w', item[0].data.shape[::-1]]) # See the explanataion on filter formats above.
        net_skeleton.append([name + '/b', item[1].data.shape])

    with open(os.path.join(args.output_dir, 'net_skeleton.ckpt'), 'wb') as f:
        cPickle.dump(net_skeleton, f, protocol=cPickle.HIGHEST_PROTOCOL)

    # Net weights. 
    net_weights = dict()
    for name, item in net.params.iteritems():
        net_weights[name + '/w'] = item[0].data.transpose(2, 3, 1, 0) # See the explanation on filter formats above.
        net_weights[name + '/b'] = item[1].data
    with open(os.path.join(args.output_dir,'net_weights.ckpt'), 'wb') as f:
        cPickle.dump(net_weights, f, protocol=cPickle.HIGHEST_PROTOCOL)
    del net, net_skeleton, net_weights
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def create_models_tiny(phase = caffe.TEST):
  baseDir = os.path.dirname(os.path.abspath(__file__))
  proposal_net = caffe.Net('{0}/models/tiny.prototxt'.format(baseDir), '{0}/models/tiny.caffemodel'.format(baseDir), phase)
  recog = caffe.Net('{0}/models/model_cz.prototxt'.format(baseDir), '{0}/models/model.caffemodel'.format(baseDir), phase)

  return proposal_net, recog
项目:emu    作者:mlosch    | 项目源码 | 文件源码
def __init__(self, prototxt, caffemodel, mean, use_gpu=False):
        self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
        if use_gpu:
            caffe.set_mode_gpu()

        if type(mean) == str:
            if mean.endswith('.binaryproto'):
                self.mean = CaffeAdapter._load_binaryproto(mean)
            elif mean.endswith('.npy'):
                self.mean = np.load(mean)
            else:
                raise ValueError('Unknown mean file format. Known formats: .binaryproto, .npy')
        elif type(mean) == np.ndarray:
            self.mean = mean
        elif mean is not None:
            raise ValueError('Unknown mean format. Expected .binaryproto/.npy file or numpy array.')

        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        if self.mean is not None:
            self.transformer.set_mean('data', self.mean.mean(1).mean(1))
        else:
            print('Warning. No mean specified.')
        self.transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (2, 1, 0))  # the reference model has channels in BGR order instead of RGB

        self.layer_types = self._load_layer_types(prototxt)

        self.ready = False

        self.use_gpu = use_gpu
项目:tensorflow-action-conditional-video-prediction    作者:williamd4112    | 项目源码 | 文件源码
def __init__(self, mean, weight, K, num_act, num_step=1, data_path='test'):
        self.K = K
        self.num_act = num_act
        self.num_step = num_step

        caffe.set_mode_gpu()
        caffe.set_device(0)

        test_net_file, net_proto = N.create_netfile(1, data_path, mean, K, K,
            1, num_act, num_step=self.num_step, mode='test')

        self.test_net = caffe.Net(test_net_file, caffe.TEST)
        self.test_net.copy_from(weight)
项目:DQN    作者:Ivehui    | 项目源码 | 文件源码
def __init__(self, action_space, model=pms.newModel):
        self.action_space = action_space

        actionSolver = None
        actionSolver = caffe.get_solver(pms.actionSolverPath)
        actionSolver.net.copy_from(model)
        # test net share weights with train net
        actionSolver.test_nets[0].share_with(actionSolver.net)
        self.solver = actionSolver

        self.targetNet = caffe.Net(pms.actionTestNetPath, model, caffe.TEST)
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def __init__(self, mean, weight, K, num_act, num_step=1, data_path='test'):
        self.K = K
        self.num_act = num_act
        self.num_step = num_step

        caffe.set_mode_gpu()
        caffe.set_device(0)

        test_net_file, net_proto = N.create_netfile(1, data_path, mean, K, K,
            1, num_act, num_step=self.num_step, mode='test')

        self.test_net = caffe.Net(test_net_file, caffe.TEST)
        self.test_net.copy_from(weight)
项目:py-faster-rcnn-tk1    作者:joeking11829    | 项目源码 | 文件源码
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
                 rpn_test_prototxt=None):
    """Use a trained RPN to generate proposals.
    """

    cfg.TEST.RPN_PRE_NMS_TOP_N = -1     # no pre NMS filtering
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000  # limit top boxes after NMS
    print 'RPN model: {}'.format(rpn_model_path)
    print('Using config:')
    pprint.pprint(cfg)

    import caffe
    _init_caffe(cfg)

    # NOTE: the matlab implementation computes proposals on flipped images, too.
    # We compute them on the image once and then flip the already computed
    # proposals. This might cause a minor loss in mAP (less proposal jittering).
    imdb = get_imdb(imdb_name)
    print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)

    # Load RPN and configure output directory
    rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
    output_dir = get_output_dir(imdb, None)
    print 'Output will be saved to `{:s}`'.format(output_dir)
    # Generate proposals on the imdb
    rpn_proposals = imdb_proposals(rpn_net, imdb)
    # Write proposals to disk and send the proposal file path through the
    # multiprocessing queue
    rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
    rpn_proposals_path = os.path.join(
        output_dir, rpn_net_name + '_proposals.pkl')
    with open(rpn_proposals_path, 'wb') as f:
        cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
    print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
    queue.put({'proposal_path': rpn_proposals_path})