Python dlib 模块,shape_predictor() 实例源码

我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用dlib.shape_predictor()

项目:tdesc    作者:bkj    | 项目源码 | 文件源码
def __init__(self, num_jitters=10, dnn=False, det_threshold=0.0, upsample=0):
        import_dlib()

        ppath = os.path.join(os.environ['HOME'], '.tdesc')

        if not dnn:
            self.detector = dlib.get_frontal_face_detector()
        else:
            detpath = os.path.join(ppath, 'models/dlib/mmod_human_face_detector.dat')
            self.detector = dlib.face_detection_model_v1(detpath)

        shapepath = os.path.join(ppath, 'models/dlib/shape_predictor_68_face_landmarks.dat')
        self.sp = dlib.shape_predictor(shapepath)

        facepath = os.path.join(ppath, 'models/dlib/dlib_face_recognition_resnet_model_v1.dat')
        self.facerec = dlib.face_recognition_model_v1(facepath)

        self.num_jitters = num_jitters
        self.dnn = dnn
        self.det_threshold = det_threshold
        self.upsample = upsample

        print >> sys.stderr, 'DlibFaceWorker: ready (dnn=%d | num_jitters=%d)' % (int(dnn), int(num_jitters))
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's facial landmark detector
        :type facePredictor: str
        :param OPENCV_Detector: The path to opencv's HaarCasscade
        :type  OPENCV_Detector: str
        :param HOG_Detector: The path to dlib's HGO face detection model
        :type  HOG_Detector: str                
        """
        assert facePredictor is not None

        self.OPENCV_Detector =  cv2.CascadeClassifier("/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_default.xml")
        self.HOG_Detector    = dlib.get_frontal_face_detector()
        self.predictor       = dlib.shape_predictor(facePredictor)
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_facial_landmarks(img):
    # No need to upsample
    rects = face_detector(img, 0)

    if len(rects) == 0:
        print "No faces"
        return None

    rect = rects[0]
    shape = shape_predictor(img, rect)
    return np.matrix([[pt.x, pt.y] for pt in shape.parts()]), rect
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        assert facePredictor is not None

        #pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        assert facePredictor is not None

        #pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def load_trained_models():
    """
        Helper function to load DLIB's models.
    """
    if not os.path.isfile("data/dlib_data/shape_predictor_68_face_landmarks.dat"):
        return
    global FACE_DETECTOR_MODEL, LANDMARKS_PREDICTOR

    FACE_DETECTOR_MODEL = dlib.get_frontal_face_detector()
    LANDMARKS_PREDICTOR = dlib.shape_predictor("data/dlib_data/shape_predictor_68_face_landmarks.dat")
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def load_trained_models():
    if not os.path.isfile("data/dlib_data/shape_predictor_68_face_landmarks.dat"):
        return
    global FACE_DETECTOR_MODEL, LANDMARKS_PREDICTOR

    FACE_DETECTOR_MODEL = dlib.get_frontal_face_detector()
    LANDMARKS_PREDICTOR = dlib.shape_predictor("data/dlib_data/shape_predictor_68_face_landmarks.dat")
项目:FaceSwapper    作者:QuantumLiu    | 项目源码 | 文件源码
def __init__(self,heads_list=[],predictor_path="./data/shape_predictor_68_face_landmarks.dat"):
        '''
        head_list:
            ?????????????????????????????????????????
            ?????????????????????????
        predictor_path:
            dlib?????
        '''
        #??????
        self.PREDICTOR_PATH = predictor_path
        self.FACE_POINTS = list(range(17, 68))
        self.MOUTH_POINTS = list(range(48, 61))
        self.RIGHT_BROW_POINTS = list(range(17, 22))
        self.LEFT_BROW_POINTS = list(range(22, 27))
        self.RIGHT_EYE_POINTS = list(range(36, 42))
        self.LEFT_EYE_POINTS = list(range(42, 48))
        self.NOSE_POINTS = list(range(27, 35))
        self.JAW_POINTS = list(range(0, 17))

        # ????????
        self.ALIGN_POINTS = (self.LEFT_BROW_POINTS + self.RIGHT_EYE_POINTS + self.LEFT_EYE_POINTS +
                                       self.RIGHT_BROW_POINTS + self.NOSE_POINTS + self.MOUTH_POINTS)

        # ???????????????????????????????????????????
        self.OVERLAY_POINTS = [self.LEFT_EYE_POINTS + self.RIGHT_EYE_POINTS + self.LEFT_BROW_POINTS + self.RIGHT_BROW_POINTS,
            self.NOSE_POINTS + self.MOUTH_POINTS]

        # ??????
        self.COLOUR_CORRECT_BLUR_FRAC = 0.6

        #?????????????dlib
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH)

        #????
        self.heads={}
        if heads_list:
            self.load_heads(heads_list)
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def __init__(self, facePredictor = None):
        """Initialize the dlib-based alignment."""
        self.detector = dlib.get_frontal_face_detector()
        if facePredictor != None:
            self.predictor = dlib.shape_predictor(facePredictor)
        else:
            self.predictor = None
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        assert facePredictor is not None

        #pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def debug_face_landmark(file, output=False, output_name='output'):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(dat_face_landmark)

    image = cv2.imread(file)
    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_size = gray.shape

    faces = detector(gray, 1)
    for (i, itr_face) in enumerate(faces):
        shape = predictor(gray, itr_face)
        shape = shape_to_np(shape)
        # convert dlib's rectangle to a OpenCV-style bounding box
        # [i.e., (x, y, w, h)], then draw the face bounding box
        (x, y, w, h) = rect_to_bb(itr_face, img_size, file)
        #print "landmark: ({:d}, {:d}) ({:d}, {:d})".format(x, y, w, h)

        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
        # show the face number
        cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # loop over the (x, y)-coordinates for the facial landmarks
        # and draw them on the image
        for (x, y) in shape:
            cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

    # show the output image with the face detections + facial landmarks
    cv2.imshow(file, image)
    cv2.waitKey(0)
    if output:
        cv2.imwrite("../" + str(output_name + 1) + '.jpg', image)
    cv2.destroyAllWindows()
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def __init__(self,option_type,path):

        self.face_cascade = cv2.CascadeClassifier("cascade/haarcascade_frontalface_default.xml")
        self.eye_cascade = cv2.CascadeClassifier("cascade/haarcascade_eye.xml")
        self.smile_cascade = cv2.CascadeClassifier("cascade/haarcascade_smile.xml")
        self.shape_predictor = "cascade/shape_predictor_68_face_landmarks.dat"
        self.facedetect = False
        self.functioncall = option_type
        self.sourcepath = path
        self.image_path = None
        self.video_path = None
        self.webcam_path = None
        self.main_function()
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def dlib_function(self,image):

        detector = dlib.get_frontal_face_detector()
        predictor = dlib.shape_predictor(self.shape_predictor)
        image = imutils.resize(image, width=500)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        rects = detector(image, 1)

        for (i, rect) in enumerate(rects):
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            for (x, y) in shape:
                cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
        return image
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        assert facePredictor is not None

        #pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
项目:photo-a-day-aligner    作者:matthewearl    | 项目源码 | 文件源码
def __init__(self, predictor_path):
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(str(predictor_path))
项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def __init__(self):
        self.PREDICTOR_PATH = "../shape_predictor_68_face_landmarks.dat"
        self.MOUTH_POINTS = [list(range(48,  61))]

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH)
项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def __init__(self):
        self.PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
        MOUTH_POINTS = list(range(48, 61))
        self.OVERLAY_POINTS = [MOUTH_POINTS]

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH)
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        assert facePredictor is not None

        #pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
项目:DeepLearningSandbox    作者:DeepLearningSandbox    | 项目源码 | 文件源码
def encode(detector, shape_predictor, model, image, win=None):
  """Encodes faces from a single image into a 128 dim descriptor.

  Args:
    detector: dlib face detector object
    shape_predictor: dlib shape predictor object
    model: dlib convnet model
    image: image as numpy array
    win: dlib window object for vizualization if VIZ flag == 1

  Returns:
    list of descriptors (np array) for each face detected in image
  """
  # dlib comments:
  # Ask the detector to find the bounding boxes of each face. The 1 in the
  # second argument indicates that we should upsample the image 1 time. This
  # will make everything bigger and allow us to detect more faces.
  dets = detector(img, 1)
  print("Number of faces detected: {}".format(len(dets)))

  descriptors = []
  for k, d in enumerate(dets):
    print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        k, d.left(), d.top(), d.right(), d.bottom()))
    # Get the landmarks/parts for the face in box d.
    shape = sp(img, d)
    # Draw the face landmarks on the screen so we can see what face is currently being processed.

    if win is not None:
      win.clear_overlay()
      win.set_image(img)
      win.add_overlay(d)
      win.add_overlay(shape)
      dlib.hit_enter_to_continue()

    # Compute the 128D vector that describes the face in img identified by shape
    face_descriptor = facerec.compute_face_descriptor(img, shape)
    descriptors.append(np.asarray(list(face_descriptor)))

  return descriptors
项目:real-time-face-recognition    作者:iwantooxxoox    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's
        :type facePredictor: str
        """
        assert facePredictor is not None

        #pylint: disable=no-member
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(facePredictor)
项目:face-identification-tpe    作者:meownoid    | 项目源码 | 文件源码
def __init__(self,
                 dlib_predictor_path,
                 face_template_path):
        self.predictor = dlib.shape_predictor(dlib_predictor_path)
        self.face_template = np.load(face_template_path)
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def face_extraction(path):
    path_str = path[:-1] if path.endswith('/') else path
    output_dir = path_str + '_faces'
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(dat_face_landmark)
    face_cascade = cv2.CascadeClassifier(xml_face_classifier)
    undetectLst = list()

    numfile = get_dataInfo(path_str)
    not_detected = 0
    itr = 0

    for itr_file in os.listdir(path_str):
        if itr_file.endswith('.jpg'):
            file = "{:s}/{:s}".format(path_str, itr_file)
            image = cv2.imread(file)
            image = imutils.resize(image, width=500)
            bFace, faces = facial_landmark_detection(image, detector, predictor, file)

            if not bFace:
                bFace, faces = face_detect_classifier(image, face_cascade)
                if not bFace:
                    print file
                    undetectLst.append(file)
                    not_detected += 1
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                    cv2.imwrite("{:s}/{:s}".format(output_dir, itr_file), image)
                    continue
            x, y, w, h = faces
            crop_img = image[y:y + h, x:x + w]
            crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
            cv2.imwrite("{:s}/{:s}".format(output_dir, itr_file), crop_img)
            itr += 1 
        else:
            continue
    total = itr + not_detected
    print "{:s}: {:4d} of {:4d} file missed detected, detect rate {:2.2f}%"\
    .format(path_str, not_detected, total, 100.0 * itr / total)
    return undetectLst, total
项目:amoc-project    作者:ajayns    | 项目源码 | 文件源码
def main_func():
    img_path='snap.jpg' # THE PATH OF THE IMAGE TO BE ANALYZED

    font=cv2.FONT_HERSHEY_DUPLEX
    emotions = ["anger", "happy", "sadness"] #Emotion list
    clahe=cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8)) # Histogram equalization object
    face_det=dlib.get_frontal_face_detector()
    land_pred=dlib.shape_predictor("data/DlibPredictor/shape_predictor_68_face_landmarks.dat")



    SUPPORT_VECTOR_MACHINE_clf2 = joblib.load('data/Trained_ML_Models/SVM_emo_model_7.pkl')
    # Loading the SVM model trained earlier in the path mentioned above.



    pred_data=[]
    pred_labels=[]

    a=crop_face(img_path)
    img=cv2.imread(a)
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    clahe_gray=clahe.apply(gray)
    landmarks_vec = get_landmarks(clahe_gray,face_det,land_pred)

    #print(len(landmarks_vec))
    #print(landmarks_vec)

    if landmarks_vec == "error":
        pass
    else:
        pred_data.append(landmarks_vec)
    np_test_data = np.array(pred_data)
    a=SUPPORT_VECTOR_MACHINE_clf2.predict(pred_data)
    #cv2.putText(img,'DETECTED FACIAL EXPRESSION : ',(8,30),font,0.7,(0,0,255),2,cv2.LINE_AA)
    #l=len('Facial Expression Detected : ')
    #cv2.putText(img,emotions[a[0]].upper(),(150,60),font,1,(255,0,0),2,cv2.LINE_AA)
    #cv2.imshow('test_image',img)
    #print(emotions[a[0]])


    cv2.waitKey(0)
    cv2.destroyAllWindows()
    return emotions[a[0]]