Python caffe 模块,Classifier() 实例源码

我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用caffe.Classifier()

项目:live-age-gender-estimator    作者:taipalma    | 项目源码 | 文件源码
def __init__(self, videoThread):

        threading.Thread.__init__(self)

        print "Initializing recognition thread..."
        self.videoThread = videoThread

    #caffe.set_mode_cpu()
        caffe.set_mode_gpu()
        caffe.set_device(0)

        # Model file and parameters are written by trainDnn.py  
        # Take the most recent parameter set

    genderPath = "./dcnn_gender"
    genderParamFiles = glob.glob(genderPath + os.sep + "*.caffemodel")
        genderParamFiles = sorted(genderParamFiles, key=lambda x:os.path.getctime(x))

    MODEL_FILE_GENDER = genderPath + os.sep + "deploy_gender.prototxt"
        PRETRAINED_GENDER = genderParamFiles[-1]
        MEAN_FILE_GENDER = genderPath + os.sep + "mean.binaryproto"

    proto_data = open(MEAN_FILE_GENDER, 'rb').read()
        a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
        mean  = caffe.io.blobproto_to_array(a)[0]

        # Initialize net             
        self.gender_net = caffe.Classifier(MODEL_FILE_GENDER, PRETRAINED_GENDER, image_dims=(227,227),)
项目:live-age-gender-estimator    作者:taipalma    | 项目源码 | 文件源码
def __init__(self, videoThread):

        threading.Thread.__init__(self)

        print "Initializing age recognition thread..."
        self.videoThread = videoThread

    #caffe.set_mode_cpu()
        caffe.set_mode_gpu()

        # Model file and parameters are written by trainDnn.py   
        # Take the most recent parameter set       
        dcnnPath = "./dcnn_age"
        paramFiles = glob.glob(dcnnPath + os.sep + "*.caffemodel")
        paramFiles = sorted(paramFiles, key=lambda x:os.path.getctime(x))

        MODEL_FILE = dcnnPath + os.sep + "deploy.prototxt"
        PRETRAINED = paramFiles[-1]
        MEAN_FILE = dcnnPath + os.sep + "mean.binaryproto"

        blob = caffe.proto.caffe_pb2.BlobProto()
        with open(MEAN_FILE, 'rb') as f:
            data = f.read()

        blob.ParseFromString(data)
        # mean = np.array( caffe.io.blobproto_to_array(blob) ) [0]
        # Added simple mean
        mean = np.array([93.5940, 104.7624, 129.1863])

        # Initialize net             
        self.net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(224,224), mean=mean)
项目:bayes-whales    作者:kern    | 项目源码 | 文件源码
def __init__(self, model_def_file, pretrained_model_file, 
                 class_labels_file, gpu_mode):
        logging.info('Loading net and associated files...')
        if gpu_mode:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()
        self.net = caffe.Classifier(
            model_def_file, pretrained_model_file,
            image_dims=(400, 400), raw_scale=400,
            mean=np.load('{}/mean.npy'.format(REPO_DIRNAME)).mean(1).mean(1), channel_swap=(2, 1, 0)
        )

        with open(class_labels_file) as f:
            labels_df = pd.DataFrame([
                {
                    'synset_id': l.strip().split(' ')[0],
                    'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
                }
                for l in f.readlines()
            ])
        self.labels = labels_df.sort('synset_id')['name'].values
项目:imgpedia    作者:scferrada    | 项目源码 | 文件源码
def __init__(self, layer="fc7", oversample = True):
        self.caffe_root = "caffe"
        self.model_prototxt = os.path.join(self.caffe_root, 'deploy.prototxt')
        self.model_trained = os.path.join(self.caffe_root, "bvlc_reference_caffenet.caffemodel")
        self.mean_image = os.path.join(self.caffe_root, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
        self.layer = layer
        caffe.set_mode_cpu()
        self.net = caffe.Classifier(self.model_prototxt, self.model_trained,
                           mean=np.load(self.mean_image).mean(1).mean(1),
                           channel_swap=(2,1,0),
                           raw_scale=255,
                           image_dims=(256, 256))
        self.size = (256,256)
        self.patch_size = 224
        self.prefix = "DeCAF7"
        self.oversample = oversample
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def make_net(mean=None, net_dir='VGG_S_rgb'):
    # net_dir specifies type of network 
    # Options are: (rgb, lbp, cyclic_lbp, cyclic_lbp_5, cyclic_lbp_10)

    caffe_root = '/home/gshine/Data/Caffe'
    sys.path.insert(0, caffe_root + 'python')

    plt.rcParams['figure.figsize'] = (10, 10)
    plt.rcParams['image.interpolation'] = 'nearest'
    plt.rcParams['image.cmap'] = 'gray'

    net_root = 'models'

    net_pretrained = os.path.join(net_root, net_dir, 'EmotiW_VGG_S.caffemodel')
    net_model_file = os.path.join(net_root, net_dir, 'deploy.prototxt')
    VGG_S_Net = caffe.Classifier(net_model_file, net_pretrained,
                       mean=mean,
                       channel_swap=(2,1,0),
                       raw_scale=255,
                       image_dims=(256, 256))
    return VGG_S_Net

# Load a minibatch of images
项目:FaceAnalysis    作者:ElliotSalisbury    | 项目源码 | 文件源码
def getNet():
    global TRANSFORMER, NET
    if TRANSFORMER is None or NET is None:
        import caffe

        os.environ['GLOG_minloglevel'] = '2'

        caffe.set_mode_cpu()
        ## Opening mean average image
        proto_data = open(mean_path, "rb").read()
        a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
        mean = caffe.io.blobproto_to_array(a)[0]
        ## Loading the CNN
        NET = caffe.Classifier(deploy_path, model_path)
        ## Setting up the right transformer for an input image
        TRANSFORMER = caffe.io.Transformer({'data': NET.blobs['data'].data.shape})
        TRANSFORMER.set_transpose('data', (2, 0, 1))
        TRANSFORMER.set_channel_swap('data', (2, 1, 0))
        TRANSFORMER.set_raw_scale('data', 255.0)
        TRANSFORMER.set_mean('data', mean)
        print('> CNN Model loaded to regress 3D Shape and Texture!')
    return TRANSFORMER, NET
项目:DeepArt    作者:jiriroz    | 项目源码 | 文件源码
def __init__(self):
        """Loading DNN model."""
        model_path = '/home/jiri/caffe/models/bvlc_googlenet/'
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_googlenet.caffemodel'
        #model_path = '/home/jiri/caffe/models/oxford102/'
        #net_fn   = model_path + 'deploy.prototxt'
        #param_fn = model_path + 'oxford102.caffemodel'

        # Patching model to be able to compute gradients.
        # Note that you can also manually add "force_backward: true" line
        #to "deploy.prototxt".
        model = caffe.io.caffe_pb2.NetParameter()
        text_format.Merge(open(net_fn).read(), model)
        model.force_backward = True
        open('tmp.prototxt', 'w').write(str(model))

        # ImageNet mean, training set dependent
        mean =  np.float32([104.0, 116.0, 122.0])
        # the reference model has channels in BGR order instead of RGB
        chann_sw = (2,1,0)
        self.net = caffe.Classifier('tmp.prototxt', param_fn, mean=mean, channel_swap=chann_sw)
项目:indus-script-ocr    作者:tpsatish95    | 项目源码 | 文件源码
def get_predictions(region_crops):
    if os.environ["IS_GPU"]:
        caffe.set_device(0)
        caffe.set_mode_gpu()
    else:
        caffe.set_mode_cpu()

    classifier = caffe.Classifier(os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "deploy.prototxt"),
                                  os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "weights.caffemodel"),
                                  mean=np.array([104, 117, 123], dtype='f4'),
                                  image_dims=[224, 224],
                                  raw_scale=255.0,
                                  channel_swap=[2, 1, 0])

    LOGGER.info("Classifying " + str(len(region_crops)) + " inputs.")

    predictions = classifier.predict(region_crops)

    return predictions
项目:emotion-conv-net    作者:GautamShine    | 项目源码 | 文件源码
def make_net(mean=None, net_dir='VGG_S_rgb'):
    # net_dir specifies type of network 
    # Options are: (rgb, lbp, cyclic_lbp, cyclic_lbp_5, cyclic_lbp_10)

    caffe_root = '/home/gshine/Data/Caffe'
    sys.path.insert(0, caffe_root + 'python')

    plt.rcParams['figure.figsize'] = (10, 10)
    plt.rcParams['image.interpolation'] = 'nearest'
    plt.rcParams['image.cmap'] = 'gray'

    net_root = 'models'

    net_pretrained = os.path.join(net_root, net_dir, 'EmotiW_VGG_S.caffemodel')
    net_model_file = os.path.join(net_root, net_dir, 'deploy.prototxt')
    VGG_S_Net = caffe.Classifier(net_model_file, net_pretrained,
                       mean=mean,
                       channel_swap=(2,1,0),
                       raw_scale=255,
                       image_dims=(256, 256))
    return VGG_S_Net

# Load a minibatch of images
项目:ml_idiot    作者:songjun54cm    | 项目源码 | 文件源码
def get_caffe_model(caffe_dir, caffe_model, gpu=True,
                    image_dims=(256, 256),
                    mean_file='default',
                    raw_scale=255.0,
                    channel_swap=(2,1,0),
                    input_scale=None):
    if mean_file == 'default':
        mean_file = os.path.join(caffe_dir, 'python', 'caffe', 'imagenet', 'ilsvrc_2012_mean.npy')
    model_path = os.path.join(caffe_dir, 'models', caffe_model, '%s.caffemodel'%caffe_model)
    model_def = os.path.join(caffe_dir, 'models', caffe_model, 'deploy.prototxt')

    print('Loading mean file %s' % mean_file)
    mean = np.load(mean_file).mean(1).mean(1)
    if gpu:
        caffe.set_mode_gpu()
    else:
        caffe.set_mode_cpu()
    net = caffe.Classifier(model_def, model_path,
                           image_dims=image_dims, mean=mean,
                           input_scale=input_scale, raw_scale=raw_scale,
                           channel_swap=channel_swap)
    return net
项目:img_classifier_prepare    作者:zonekey    | 项目源码 | 文件源码
def __init__(self, deploy, pretrained, mean, labels, gpu = False):
        if gpu:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()    # in windows, only CPU mode supported

        self.__labels = self.load_labels(labels);
        mean_ar = self.convert(mean)

        if True:
            self.__net = caffe.Classifier(deploy, pretrained,
                    mean = mean_ar.mean(1).mean(1),
                    channel_swap = (2, 1, 0), 
                    raw_scale = 255,
                    image_dims = (256, 256))
        else: 
            self.__net = caffe.Net(deploy, pretrained, caffe.TEST)
            print self.__net.blobs['data'].data.shape    

            self.__transformer = caffe.io.Transformer({'data': self.__net.blobs['data'].data.shape})
            self.__transformer.set_transpose('data', (2,0,1)) # height*width*channel -> channel*height*width
            self.__transformer.set_mean('data', mean_ar)
            self.__transformer.set_raw_scale('data', 255)
            self.__transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def __init__(self):
        caffe.set_mode_gpu()
        #caffe.set_device(0)
        model_path = '../models/bvlc_googlenet/' # substitute your path here
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_googlenet.caffemodel'
        model = caffe.io.caffe_pb2.NetParameter()
        text_format.Merge(open(net_fn).read(), model)
        model.force_backward = True #backward to input layer
        open('tmp.prototxt', 'w').write(str(model))
        self.net = caffe.Classifier('tmp.prototxt', param_fn,
                       mean = np.float32([104.0, 116.0, 122.0]), 
                       channel_swap = (2,1,0))
        # for the mode guide, if flag = 1               
        self.flag = 0
        self.epoch = 20
        self.end = 'inception_4c/output'
        #self.end = 'conv4'
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def __init__(self):
        caffe.set_mode_gpu()
        #caffe.set_device(0)
        model_path = '../models/bvlc_googlenet/' # substitute your path here
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_googlenet.caffemodel'
        model = caffe.io.caffe_pb2.NetParameter()
        text_format.Merge(open(net_fn).read(), model)
        model.force_backward = True #backward to input layer
        open('tmp.prototxt', 'w').write(str(model))
        self.net = caffe.Classifier('tmp.prototxt', param_fn,
                       mean = np.float32([104.0, 116.0, 122.0]), 
                       channel_swap = (2,1,0))
        # for the mode guide, if flag = 1               
        self.flag = 0
        self.epoch = 20
        self.end = 'inception_4c/output'
        #self.end = 'conv4'
项目:image-classifier    作者:gustavkkk    | 项目源码 | 文件源码
def init_net():
    net = caffe.Classifier(caffe_root  + 'models/bvlc_reference_caffenet/deploy.prototxt',
                           caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
    net.set_phase_test()
    net.set_mode_cpu()
    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    net.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'))  # ImageNet mean
    net.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
    net.set_channel_swap('data', (2,1,0))  # the reference model has channels in BGR order instead of RGB
    return net
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def make_net(mean=None, net_dir='VGG_S_rgb'):
    # net_dir specifies a root directory containing a *.caffemodel file
    # Options in our setup are: VGG_S_[rgb / lbp / cyclic_lbp / cyclic_lbp_5 / cyclic_lbp_10]

    # This should hopefully already be in your system path, but just to be sure:
    caffe_root = '/home/Users/Dan/Development/caffe/'
    sys.path.insert(0, caffe_root + 'python')

    # Configure matplotlib
    plt.rcParams['figure.figsize'] = (10, 10)
    plt.rcParams['image.interpolation'] = 'nearest'
    plt.rcParams['image.cmap'] = 'gray'

    # Generate paths to the various model files
    net_root = 'models'
    net_pretrained = os.path.join(net_root, net_dir, 'EmotiW_VGG_S.caffemodel')
    net_model_file = os.path.join(net_root, net_dir, 'deploy.prototxt')

    # Construct Caffe network object
    VGG_S_Net = caffe.Classifier(net_model_file, net_pretrained,
                       mean=mean,
                       channel_swap=(2,1,0),
                       raw_scale=255,
                       image_dims=(256, 256))
    return VGG_S_Net

# Load a minibatch of images
# Inputs:   List of image filenames, 
#           Color boolean (true if images are in color), 
#           List of labels corresponding to each image, 
#           Index of first image to load
#           Number of images to load
# Output:   List of image numpy arrays of size Num x (W x H x 3)
#           List of labels for just the images in the batch
项目:lighting-augmentation    作者:GemHunt    | 项目源码 | 文件源码
def get_classifier(model_name, crop_size):
    model_dir = model_name + '/'
    MODEL_FILE = model_dir + 'deploy.prototxt'
    PRETRAINED = model_dir + 'snapshot.caffemodel'
    meanFile = model_dir + 'mean.binaryproto'

    # Open mean.binaryproto file
    blob = caffe.proto.caffe_pb2.BlobProto()
    data = open(meanFile, 'rb').read()
    blob.ParseFromString(data)
    mean_arr = np.array(caffe.io.blobproto_to_array(blob)).reshape(1, crop_size, crop_size)
    print mean_arr.shape

    net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(crop_size, crop_size), mean=mean_arr, raw_scale=255)
    return net
项目:deepdrone    作者:dgorissen    | 项目源码 | 文件源码
def load_model(model="bvlc_reference_caffenet"):
    # Path to caffe install
    # caffe_root = '~/deep-learning/caffe'
    caffe_root = "~/git/caffe"
    caffe_root = os.path.expanduser(caffe_root)

    # Set the right paths to your model definition file, pretrained model weights
    # and labels file. This example uses the pre-trained ILSVRC12 image classifier
    # CaffeNet model.
    # You can download it by following the installation instructions steps under
    # http://caffe.berkeleyvision.org/model_zoo.htmli
    MODEL_FILE = caffe_root + ('/models/%s/deploy.prototxt' % model)
    PRETRAINED = caffe_root + ('/models/%s/%s.caffemodel' % (model, model))
    LABELS_FILE = caffe_root + '/data/ilsvrc12/synset_words.txt'
    MEAN_FILE = caffe_root + "/python/caffe/imagenet/ilsvrc_2012_mean.npy"

    # load the network via the cafe.Classifier() method
    net = caffe.Classifier(MODEL_FILE, PRETRAINED,
                           mean=np.load(MEAN_FILE).mean(1).mean(1),
                           channel_swap=(2, 1, 0),
                           raw_scale=255,
                           image_dims=(256, 256))

    # get labels from according file
    labels = []
    with open(LABELS_FILE) as f:
        labels = pd.DataFrame([
            {
                'synset_id': l.strip().split(' ')[0],
                'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
            }
            for l in f.readlines()])
        labels = labels.sort_values('synset_id')['name'].values

    return net, labels

# Worker function that takes a frame to be classified from an input queue and
# returns the classification result
项目:WechatForwardBot    作者:grapeot    | 项目源码 | 文件源码
def __init__(self, featurefn):
        self.resultNum = 5
        self.net = caffe.Classifier(model_prototxt, model_trained,
            mean=np.load(mean_path).mean(1).mean(1),
            channel_swap=(2,1,0),
            raw_scale=255,
            image_dims=(256, 256))
        self.parseFeature(featurefn)
        logging.info('Caffe net initialized. Loading cache...')
        #self.buildCache()
        self.loadCache('./featureCache.tsv')
        logging.info('Cache built.')
项目:indus-script-ocr    作者:tpsatish95    | 项目源码 | 文件源码
def get_symbol_classifications(symbols):
    if os.environ["IS_GPU"]:
        caffe.set_device(0)
        caffe.set_mode_gpu()
    else:
        caffe.set_mode_cpu()

    classifier = caffe.Classifier(os.path.join(os.environ["JAR_NOJAR_MODELS_DIR"], "deploy.prototxt"),
                                  os.path.join(os.environ["JAR_NOJAR_MODELS_DIR"], "weights.caffemodel"),
                                  image_dims=[64, 64],
                                  raw_scale=255.0)

    LOGGER.info("Classifying " + str(len(symbols)) + " inputs.")

    predictions = classifier.predict([s[1] for s in symbols])

    symbol_sequence = list()
    classes = np.array([0, 1])

    for i, prediction in enumerate(predictions):
        idx = list((-prediction).argsort())
        prediction = classes[np.array(idx)]

        if prediction[0] == 1:
            symbol_sequence.append([symbols[i], "jar"])
        elif prediction[0] == 0:
            symbol_sequence.append([symbols[i], "no-jar"])

    return symbol_sequence
项目:dqn_detection    作者:omgteam    | 项目源码 | 文件源码
def __init__(self, model, deploy, mean_value=np.asarray([104,  117,  123]), crop_size=227, batch_size=1, feature_blob='pool5',log="../log/cnn.log", Test = False):
        net = caffe.Classifier(deploy, model, caffe.TEST)

        transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
        transformer.set_transpose('data', (2,0,1))
        transformer.set_mean('data', mean_value) # mean pixel
        transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
        transformer.set_channel_swap('data', (2,1,0))  # the reference model has channels in BGR order instead of RGB
        net.blobs['data'].reshape(batch_size, 3, crop_size, crop_size)
        self.net = net
        self.transformer = transformer
        self.feature_blob = feature_blob
        self.log_file = open(log,"w")
        self.Test = Test
项目:OSDN    作者:abhijitbendale    | 项目源码 | 文件源码
def compute_features(imgname, args):
    """
    Instantiate a classifier class, pass the images through the network and save features.
    Features are saved in .mat format
    """
    image_dims = [int(s) for s in args.images_dim.split(',')]
    if args.force_grayscale:
      channel_swap = None
      mean_file = None
    else:
      channel_swap = [int(s) for s in args.channel_swap.split(',')]
      mean_file = args.mean_file

    # Make classifier.
    classifier = caffe.Classifier(args.model_def, args.pretrained_model,
            image_dims=image_dims, gpu=args.gpu, mean_file=mean_file,
            input_scale=args.input_scale, channel_swap=channel_swap)

    if args.gpu:
        print 'GPU mode'



    outfname = imgname.replace('imageNetForWeb', 'imageNetForWeb_Features') + ".mat"
    print outfname
    if not path.exists(path.dirname(outfname)):
        os.makedirs(path.dirname(outfname))

    inputs = [caffe.io.load_image(imgname)]

    if args.force_grayscale:
        inputs = [rgb2gray(input) for input in inputs];

    print "Classifying %d inputs." % len(inputs)

    scores = classifier.predict(inputs, not args.center_only)
        # Now save features
    feature_dict = {}
    feature_dict['IMG_NAME'] = path.join(path.dirname(imgname), path.basename(imgname))
    feature_dict['fc7'] = sp.asarray(classifier.blobs['fc7'].data.squeeze(axis=(2,3)))
    feature_dict['fc8'] = sp.asarray(classifier.blobs['fc8'].data.squeeze(axis=(2,3)))
    feature_dict['prob'] = sp.asarray(classifier.blobs['prob'].data.squeeze(axis=(2,3)))
    feature_dict['scores'] = sp.asarray(scores)
    savemat(outfname, feature_dict)
项目:MLT-Classifier    作者:submagr    | 项目源码 | 文件源码
def main(argv):
    inputfile = ''
    outputfile = ''

    try:
        opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
    except getopt.GetoptError:
        print 'caffe_feature_extractor.py -i <inputfile> -o <outputfile>'
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print 'caffe_feature_extractor.py -i <inputfile> -o <outputfile>'
            sys.exit()
        elif opt in ("-i"):
            inputfile = arg
        elif opt in ("-o"):
            outputfile = arg

    print 'Reading images from "', inputfile
    print 'Writing vectors to "', outputfile

    # Setting this to CPU, but feel free to use GPU if you have CUDA installed
    caffe.set_mode_gpu()
    # Loading the Caffe model, setting preprocessing parameters
    net = caffe.Classifier(model_prototxt, model_trained,
                           mean=np.load(mean_path).mean(1).mean(1),
                           channel_swap=(2,1,0),
                           raw_scale=255,
                           image_dims=(256, 256))

    # Loading class labels
    with open(imagenet_labels) as f:
        labels = f.readlines()

    # This prints information about the network layers (names and sizes)
    # You can uncomment this, to have a look inside the network and choose which layer to print
    #print [(k, v.data.shape) for k, v in net.blobs.items()]
    #exit()

    # Processing one image at a time, printint predictions and writing the vector to a file
    with open(inputfile, 'r') as reader:
        with open(outputfile, 'w') as writer:
            writer.truncate()
            for image_path in reader:
                image_path = image_path.strip()
                input_image = caffe.io.load_image(image_path)
                prediction = net.predict([input_image], oversample=False)
                print os.path.basename(image_path), ' : ' , labels[prediction[0].argmax()].strip() , ' (', prediction[0][prediction[0].argmax()] , ')'
                np.savetxt(writer, net.blobs[layer_name].data[0].reshape(1,-1), fmt='%.8g')
项目:WechatForwardBot    作者:grapeot    | 项目源码 | 文件源码
def main(argv):
    inputfile = ''
    outputfile = ''

    try:
        opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
    except getopt.GetoptError:
        print('caffe_feature_extractor.py -i <inputfile> -o <outputfile>')
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print('caffe_feature_extractor.py -i <inputfile> -o <outputfile>')
            sys.exit()
        elif opt in ("-i"):
            inputfile = arg
        elif opt in ("-o"):
            outputfile = arg

    print('Reading images from "', inputfile)
    print('Writing vectors to "', outputfile)

    # Setting this to CPU, but feel free to use GPU if you have CUDA installed
    caffe.set_mode_cpu()
    # Loading the Caffe model, setting preprocessing parameters
    net = caffe.Classifier(model_prototxt, model_trained,
                           mean=np.load(mean_path).mean(1).mean(1),
                           channel_swap=(2,1,0),
                           raw_scale=255,
                           image_dims=(256, 256))

    # Loading class labels
    with open(imagenet_labels) as f:
        labels = f.readlines()

    # This prints information about the network layers (names and sizes)
    # You can uncomment this, to have a look inside the network and choose which layer to print
    #print [(k, v.data.shape) for k, v in net.blobs.items()]
    #exit()

    # Processing one image at a time, printint predictions and writing the vector to a file
    with open(inputfile, 'r') as reader:
        with open(outputfile, 'w') as writer:
            writer.truncate()
            for image_path in reader:
                try:
                    image_path = image_path.strip()
                    with open(image_path, 'rb') as fp:
                        cachekey = hashlib.sha224(fp.read()).hexdigest()
                    input_image = caffe.io.load_image(image_path)
                    prediction = net.predict([input_image], oversample=False)
                    print(os.path.basename(image_path), ' : ' , labels[prediction[0].argmax()].strip() , ' (', prediction[0][prediction[0].argmax()] , ')')
                    feature = net.blobs[layer_name].data[0].reshape(1,-1)
                    featureTxt = ' '.join([ str(x) for x in feature.tolist()[0] ])
                    writer.write('{0}\t{1}\t{2}\n'.format(image_path, cachekey, featureTxt))
                except Exception as e:
                    print(e)
                    print('ERROR: skip {0}.'.format(image_path))
项目:DeepVis-PredDiff    作者:lmzintgraf    | 项目源码 | 文件源码
def get_caffenet(netname):

    if netname=='googlenet':

        # caffemodel paths
        model_path = './Caffe_Models/googlenet/'
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_googlenet.caffemodel'

        # get the mean (googlenet doesn't do this per feature, but per channel, see train_val.prototxt)
        mean = np.float32([104.0, 117.0, 123.0]) 

        # define the neural network classifier
        net = caffe.Classifier(net_fn, param_fn, caffe.TEST, channel_swap = (2,1,0), mean = mean)

    elif netname=='alexnet':

        # caffemodel paths
        model_path = './Caffe_Models/bvlc_alexnet/'
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_alexnet.caffemodel'

        # get the mean
        mean = np.load('./Caffe_Models/ilsvrc_2012_mean.npy')
        # crop mean
        image_dims = (227,227) # see deploy.prototxt file
        excess_h = mean.shape[1] - image_dims[0]
        excess_w = mean.shape[2] - image_dims[1]
        mean = mean[:, excess_h:(excess_h+image_dims[0]), excess_w:(excess_w+image_dims[1])]

        # define the neural network classifier
        net = caffe.Classifier(net_fn, param_fn, caffe.TEST, channel_swap = (2,1,0), mean = mean)

    elif netname == 'vgg':

        # caffemodel paths
        model_path = './Caffe_Models/vgg network/'
        net_fn   = model_path + 'VGG_ILSVRC_16_layers_deploy.prototxt'
        param_fn = model_path + 'VGG_ILSVRC_16_layers.caffemodel'

        mean = np.float32([103.939, 116.779, 123.68])    

        # define the neural network classifier    
        net = caffe.Classifier(net_fn, param_fn, caffe.TEST, channel_swap = (2,1,0), mean = mean)

    else:

        print 'Provided netname unknown. Returning None.'
        net = None

    return net
项目:fine-tuning    作者:junyuseu    | 项目源码 | 文件源码
def main():
    """ Read a image of flowers and predict which kind of flowers it is. """
    parse=argparse.ArgumentParser()
    parse.add_argument(
        "input_file",
        help="Image file you want to predict"
    )
    parse.add_argument(
        "model",
        help="network structure"
    )
    parse.add_argument(
        "weights",
        help="pretrained model"
    )
    parse.add_argument(
        "mean_file",
        help="mena file"
    )
    parse.add_argument(
        "mean_size",
        type=int,
        help="test crop size of the original image.eg for CaffeNet is 227 and for VGGNet is 224"
    )
    args=parse.parse_args()
    image=caffe.io.load_image(args.input_file)
    if args.mean_size==224:
        imagenet_mean = np.load(args.mean_file)[:, 16:16 + 224, 16:16 + 224]
    elif args.mean_size==227:
        imagenet_mean = np.load(args.mean_file)[:, 14:14 + 227, 14:14 + 227]
    net=caffe.Classifier(
        args.model,args.weights,
        mean=imagenet_mean,# subtract the dataset-mean value in each channel
        channel_swap=(2,1,0),# swap channels from RGB to BGR
        raw_scale=255,# rescale from [0, 1] to [0, 255]
        image_dims=(256,256)
    )
    result=net.predict([image])
    label=np.argmax(result)
    plt.imshow(image)
    plt.axis('off')
    plt.title('{}:{:.3f}'.format(labels[label],result[0][label]))
    plt.savefig('{}.png'.format(labels[label]))
    plt.show()