Python cv2 模块,CascadeClassifier() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.CascadeClassifier()

项目:Easitter    作者:TomoyaFujita2016    | 项目源码 | 文件源码
def detectFace(image):
    cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
    FACE_SHAPE = 0.45
    result = image.copy()
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade = cv2.CascadeClassifier(cascadePath)
    faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))

    if len(faceRect) <= 0:
        return False
    else:
        # confirm face
        imageSize = image.shape[0] * image.shape[1]
        #print("d1")
        filteredFaceRects = []
        for faceR in faceRect:
            faceSize = faceR[2]*faceR[3]
            if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
                break
            filteredFaceRects.append(faceR)

        if len(filteredFaceRects) > 0:
            return True
        else:
            return False
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def face_detect(self, img):
        """ Detect the face location of the image img, using Haar cascaded face detector of OpenCV.

        return : x,y w, h of the bouning box.
        """
        face_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_default.xml')
        faces = face_cascade.detectMultiScale(img, 1.3, 5)
        x = -1
        y = -1
        w = -1
        h = -1
        if len(faces) == 1: # we take only when we have 1 face, else, we return nothing.
            x,y,w,h = faces[0]
        else:
##            for (x_,y_,w_,h_) in faces:
##                x = x_
##                y = y_
##                w = w_
##                h = h_
##                break # we take only the first face,
            print "More than one face!!!!!!!!!"


        return x,y,w,h
项目:face_ar    作者:pseelinger    | 项目源码 | 文件源码
def index():
    img_array = []
    label_array = []
    face_cascade = cv2.CascadeClassifier("https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_alt.xml")
    recognizer = cv2.createLBPHFaceRecognizer()
    for row in db(db.faces.id > 0).select():
        rtn = row
        path=os.path.join(request.folder, 'uploads', rtn.file)
#         image = response.download(open(path, 'rb'), chunk_size=4096)
        img = cv2.imread(path, 0)
        img_array.append(img)
#         faces = face_cascade.detectMultiScale(img, 1.3, 5)
#         for (x,y,w,h) in faces:
#             img_array.append(img[y: y + h, x: x + w])
        label_array.append(rtn.user_id)
    recognizer.train(img_array, np.array(label_array))
    recognizer.save(os.path.join(request.folder, 'private', "trained_recognizer.xml"))
    return response.download("trained_recognizer.xml")
项目:DeepFryBot    作者:asdvek    | 项目源码 | 文件源码
def find_eyes(img):
    # print("Searching for eyes...")
    coords = []
    face_cascade = cv2.CascadeClassifier('./classifiers/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('./classifiers/haarcascade_eye.xml')
    gray = np.array(img.convert("L"))

    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        roi_gray = gray[y:y + h, x:x + w]
        eyes = eye_cascade.detectMultiScale(roi_gray)
        for (ex, ey, ew, eh) in eyes:
            # print("\tFound eye at ({0}, {1})".format(x+ex+ew/2, y+ey+eh/2))
            coords.append((x+ex+ew/2, y+ey+eh/2))
    if len(coords) == 0:
        # print("\tNo eyes found.")
        pass
    return coords
项目:Emotion-Recognition    作者:HashCode55    | 项目源码 | 文件源码
def process_image(img = list()):
    """
    Extracts faces from the image using haar cascade, resizes and applies filters. 
    :param img: image matrix. Must be grayscale
    ::returns faces:: list contatining the cropped face images
    """
    face_cascade = cv2.CascadeClassifier('/Users/mehul/opencv-3.0.0/build/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')   

    faces_location = face_cascade.detectMultiScale(img, 1.3, 5)
    faces = []

    for (x,y,w,h) in faces_location:
        img = img[y:(y+h), x:(x+w)]
        try:
            img = cv2.resize(img, (256, 256))
        except:
            exit(1)
        img = cv2.bilateralFilter(img,15,10,10)
        img = cv2.fastNlMeansDenoising(img,None,4,7,21)
        faces.append(img)

    return faces
项目:StreamMotionDetection    作者:henry54809    | 项目源码 | 文件源码
def __init__(self, scale=1.08):
         script_path = common.get_script_path()
         self.cascade = cv2.CascadeClassifier(script_path + "/haarcascade_frontalface_alt.xml")
         self.cascade_profile = cv2.CascadeClassifier(script_path + '/haarcascade_profileface.xml')
         self.scale = scale
         self.hog = cv2.HOGDescriptor()
         self.hog.load(script_path + '/hard_negative_svm/hog.xml')
         self.svm = cv2.ml.SVM_load(script_path + '/hard_negative_svm/output_frontal.xml')
         self.svm_profile = cv2.ml.SVM_load(script_path + '/hard_negative_svm/output_profile.xml')
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def main(args):

    saveFace = None;
    cap = cv2.VideoCapture(0)
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()
        faces = face_cascade.detectMultiScale(frame, 1.3, 5)
        if len(faces) > 0:
            saveFace = frame
            break;
        # Display the resulting frame
        cv2.imshow('frame',frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)

    mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    myImage = []
    for file in onlyfiles:
        isImage = None
        file = mypath + '/' + file
        isImage = imghdr.what(file)
        if isImage != None:
            myImage.append(file)

    #begin facenet
    cp.main(args,myImage);
项目:Automatic_Group_Photography_Enhancement    作者:Yuliang-Zou    | 项目源码 | 文件源码
def getFaceData(img):
    # Create the haar cascade
    faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    # Read the image
    image = cv2.imread(img)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # Detect faces in the image
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        flags = cv2.cv.CV_HAAR_SCALE_IMAGE
     )
    for (x, y, w, h) in faces:
        facedata = image[y:y+h, x:x+w]
    return facedata
项目:python-smart-crop    作者:epixelic    | 项目源码 | 文件源码
def center_from_faces(matrix):
    face_cascade = cv2.CascadeClassifier(cascade_path)
    faces = face_cascade.detectMultiScale(matrix, FACE_DETECT_REJECT_LEVELS, FACE_DETECT_LEVEL_WEIGHTS)

    x, y = (0, 0)
    weight = 0

    # iterate over our faces array
    for (x, y, w, h) in faces:
        print('Face detected at ', x, y, w, h)
        weight += w * h
        x += (x + w / 2) * w * h
        y += (y + h / 2) * w * h

    if len(faces) == 0:
        return False

    return {
        'x': x / weight,
        'y': y / weight,
        'count': len(faces)
    }
项目:PyHack    作者:lanxia    | 项目源码 | 文件源码
def faceDetect(path, fileName):
    img = cv2.read(path)
    cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
    rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR, SCALE_IMAGE, (20, 20))

    if len(rects) == 0:
        return False

    rects[:, 2:] += rects[:, :2]

    for x1, y1, x2, y2 in rects:
        cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)

    cv2.imwrite("%s/%s-%s" % (facesDirectory, pcapFile, fileName), img)

    return True
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def debug_face_classifier(file):
    face_cascade = cv2.CascadeClassifier(xml_face_classifier)
    image = cv2.imread(file)

    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(image, 1.07, 3)
    print faces
    for (x, y, w, h) in faces:
        cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
        #roi_gray = gray[y:y+h, x:x+w]
        #roi_color = image[y:y+h, x:x+w]

    cv2.imshow('Image', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def __init__(self):
        self.node_name = "face_recog_fisher"
        rospy.init_node(self.node_name)

        rospy.on_shutdown(self.cleanup)
        self.bridge = CvBridge()
        self.face_names = StringArray()
        self.all_names = StringArray()

        self.size = 4
        face_haar = 'haarcascade_frontalface_default.xml'
        self.haar_cascade = cv2.CascadeClassifier(face_haar)
        self.face_dir = 'face_data_fisher'
        self.model = cv2.createFisherFaceRecognizer()
        # self.model = cv2.createEigenFaceRecognizer()

        (self.im_width, self.im_height) = (112, 92)        

        rospy.loginfo("Loading data...")
        # self.fisher_train_data()
        self.load_trained_data()
        rospy.sleep(3)        

        # self.img_sub = rospy.Subscriber("/asus/rgb/image_raw", Image, self.img_callback)
        self.img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)

        # self.img_pub = rospy.Publisher('face_img', Image, queue_size=10)
        self.name_pub = rospy.Publisher('face_names', StringArray, queue_size=10)
        self.all_names_pub = rospy.Publisher('all_face_names', StringArray, queue_size=10)
        rospy.loginfo("Detecting faces...")
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def __init__(self):
        self.node_name = "train_faces_eigen"
        rospy.init_node(self.node_name)

        rospy.on_shutdown(self.cleanup)
        self.bridge = CvBridge()

        self.size = 4
        face_haar = 'haarcascade_frontalface_default.xml'
        self.haar_cascade = cv2.CascadeClassifier(face_haar)
        self.face_dir = 'face_data_eigen'
        self.face_name = sys.argv[1]
        self.path = os.path.join(self.face_dir, self.face_name)
        # self.model = cv2.createFisherFaceRecognizer()
        self.model = cv2.createEigenFaceRecognizer()

        self.cp_rate = 5

        if not os.path.isdir(self.path):
            os.mkdir(self.path)

        self.count = 0    

        self.train_img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)
        # self.train_img_pub = rospy.Publisher('train_face', Image, queue_size=10)
        rospy.loginfo("Capturing data...")
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def __init__(self):
        self.node_name = "train_faces_fisher"
        rospy.init_node(self.node_name)

        rospy.on_shutdown(self.cleanup)
        self.bridge = CvBridge()

        self.size = 4
        face_haar = 'haarcascade_frontalface_default.xml'
        self.haar_cascade = cv2.CascadeClassifier(face_haar)
        self.face_dir = 'face_data_fisher'
        self.face_name = sys.argv[1]
        self.path = os.path.join(self.face_dir, self.face_name)
        self.model = cv2.createFisherFaceRecognizer()
        # self.model = cv2.createEigenFaceRecognizer()

        self.cp_rate = 5

        if not os.path.isdir(self.path):
            os.mkdir(self.path)

        self.count = 0    

        self.train_img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)
        # self.train_img_pub = rospy.Publisher('train_face', Image, queue_size=10)
        rospy.loginfo("Capturing data...")
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def __init__(self):
        self.node_name = "face_recog_eigen"
        rospy.init_node(self.node_name)

        rospy.on_shutdown(self.cleanup)
        self.bridge = CvBridge()
        self.face_names = StringArray()

        self.size = 4
        face_haar = 'haarcascade_frontalface_default.xml'
        self.haar_cascade = cv2.CascadeClassifier(face_haar)
        self.face_dir = 'face_data_eigen'
        # self.model = cv2.createFisherFaceRecognizer()
        self.model = cv2.createEigenFaceRecognizer()

        (self.im_width, self.im_height) = (112, 92)        

        rospy.loginfo("Loading data...")
        # self.fisher_train_data()
        self.load_trained_data()
        rospy.sleep(3)        

        # self.img_sub = rospy.Subscriber("/asus/rgb/image_raw", Image, self.img_callback)
        self.img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback)

        # self.img_pub = rospy.Publisher('face_img', Image, queue_size=10)
        self.name_pub = rospy.Publisher('face_names', StringArray, queue_size=10)
        self.all_names_pub = rospy.Publisher('all_face_names', StringArray, queue_size=10)
        rospy.loginfo("Detecting faces...")
项目:HappyNet    作者:danduncan    | 项目源码 | 文件源码
def load_cascades():
    # Load Haar cascade files containing features
    cascPaths = ['models/haarcascades/haarcascade_frontalface_default.xml',
                 'models/haarcascades/haarcascade_frontalface_alt.xml',
                 'models/haarcascades/haarcascade_frontalface_alt2.xml',
                 'models/haarcascades/haarcascade_frontalface_alt_tree.xml'
                 'models/lbpcascades/lbpcascade_frontalface.xml']
    faceCascades = []
    for casc in cascPaths:
        faceCascades.append(cv.CascadeClassifier(casc))

    return faceCascades

# Do Haar cascade face detection on a single image
# Face detection returns a list of faces
# Where each face is the coordinates of a rectangle containing a face:
#   (x,y,w,h)
项目:Face-recognition-test    作者:jiangwei1995910    | 项目源码 | 文件源码
def getFaceArray(img):
    #??,haarcascade_frontalface_default.xml??????????
    face_cascade=cv2.CascadeClassifier("/home/jiangwei/??/faceRead/haarcascade_frontalface_default.xml")
    if img.ndim == 3:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
            gray = img  #if?????img???3????????????????gray?????3????2????????

    faces = face_cascade.detectMultiScale(gray, 1.2, 5)#1.3?5?????????????????????????
    result = []
    for (x,y,width,height) in faces:
            result.append((x,y,x+width,y+height))

    return result
    # if(len(result)>0):
    #     # for r in result:
    #         # img2=cv2.rectangle(img, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
    #         # img3=img[r[1]:r[3], r[0]:r[2]]  # ?????????????
    #
    #     return result
    #
    # return []

#??????
项目:Face-recognition-test    作者:jiangwei1995910    | 项目源码 | 文件源码
def getFaceImg(img):
    face_cascade=cv2.CascadeClassifier("/home/jiangwei/??/faceRead/haarcascade_frontalface_default.xml")
    if img.ndim == 3:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
            gray = img  #if?????img???3????????????????gray?????3????2????????

    faces = face_cascade.detectMultiScale(gray, 1.2, 5)#1.3?5?????????????????????????
    result = []
    for (x,y,width,height) in faces:
            result.append((x,y,x+width,y+height))

    print result
    if(len(result)>0):
        for r in result:
            img2=cv2.rectangle(img, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
            img3=img[r[1]:r[3], r[0]:r[2]]  # ?????????????


        return [img3,img2]

    return []

#??????
项目:CodeLabs    作者:TheIoTLearningInitiative    | 项目源码 | 文件源码
def get_frame(self):

        ret, self.image = self.cap.read()

        cv2.imwrite(self.temporal, self.image)

        faceCascade = cv2.CascadeClassifier("classifier/haarcascade_frontalface_alt.xml")
        image = cv2.imread(self.temporal)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            flags = cv2.cv.CV_HAAR_SCALE_IMAGE
        )
        print "Found {0} faces!".format(len(faces))

        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

    cv2.imwrite(self.faces,np.hstack((self.image,image)))
    return open(self.faces, 'rb').read()
项目:CodeLabs    作者:TheIoTLearningInitiative    | 项目源码 | 文件源码
def detect(self):
        faceCascade = cv2.CascadeClassifier(self.cascPath)
        image = cv2.imread(self.imageinput)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            flags = cv2.cv.CV_HAAR_SCALE_IMAGE
        )

        print "Found {0} faces!".format(len(faces))

        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
        cv2.imwrite(self.imageoutput, image)
        cv2.waitKey(0)
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def __init__(self, facePredictor):
        """
        Instantiate an 'AlignDlib' object.

        :param facePredictor: The path to dlib's facial landmark detector
        :type facePredictor: str
        :param OPENCV_Detector: The path to opencv's HaarCasscade
        :type  OPENCV_Detector: str
        :param HOG_Detector: The path to dlib's HGO face detection model
        :type  HOG_Detector: str                
        """
        assert facePredictor is not None

        self.OPENCV_Detector =  cv2.CascadeClassifier("/home/pi/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_default.xml")
        self.HOG_Detector    = dlib.get_frontal_face_detector()
        self.predictor       = dlib.shape_predictor(facePredictor)
项目:face_detection    作者:PuchatekwSzortach    | 项目源码 | 文件源码
def check_opencv_accuracy(image_paths, bounding_boxes_map):

    detection_scores = []

    filters_path = os.path.expanduser("~/anaconda3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml")
    cascade_classifier = cv2.CascadeClassifier(filters_path)

    for path in tqdm.tqdm(image_paths):

        image = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY)

        image_bounding_box = shapely.geometry.box(0, 0, image.shape[1], image.shape[0])
        face_bounding_box = bounding_boxes_map[os.path.basename(path)]

        # Only try to search for faces if they are larger than 1% of image. If they are smaller,
        # ground truth bounding box is probably incorrect
        if face.geometry.get_intersection_over_union(image_bounding_box, face_bounding_box) > 0.01:

            value = 1 if does_opencv_detect_face_correctly(image, face_bounding_box, cascade_classifier) else 0
            detection_scores.append(value)

    print("OpenCV accuracy is {}".format(np.mean(detection_scores)))
项目:Python_SelfLearning    作者:fukuit    | 项目源码 | 文件源码
def facedetect(file):
    """ haar????????????????????????
    Args:
        file : ????????????
    """
    face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
    img = cv2.imread(file)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray)
        for(ex, ey, ew, eh) in eyes:
            cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.show()
项目:smart-cam    作者:smart-cam    | 项目源码 | 文件源码
def __init__(self):
        cfg = Config()
        # set up face detection models
        opencv_home = cfg.get("face_detection", "opencv_home")
        haarcascade = cfg.get("face_detection", "haarcascade")
        cascadePath = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"
        self.faceCascade = cv2.CascadeClassifier('{0}/{1}'.format(opencv_home, haarcascade))

        self.recognizer = cv2.face.createLBPHFaceRecognizer()
        #self.recognizer = cv2.face.createEigenFaceRecognizer()
        #self.recognizer = cv2.face.createFisherFaceRecognizer()

        # the faces and Raspberry Pi locations we'll use
        self.names = ["james", "juanjo", "sayantan", "vineet"]
        self.rasp_names = ["FrontDoor", "Entrance", "Garage"]
        access = cfg.get("aws", "access_key_id")
        secret = cfg.get("aws", "secret_access_key")

        # connect to dynamo
        self.conn = boto.dynamodb2.connect_to_region('us-west-1', aws_access_key_id=access, aws_secret_access_key=secret)
        self.sc = Table('SMARTCAM', connection=self.conn)


    # read in training set and train the model
项目:Face-Detection-using-Haarcascade    作者:KrUciFieR-Jr    | 项目源码 | 文件源码
def detect():
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

    cap = cv2.VideoCapture(0)
    if(face_cascade=='0'):
        print("Hello This is NUll")
    while True:
        ret , img = cap.read()
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,1.3,5)
        for (x,y,w,h) in faces:
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h,x:x+w]
            roi_color = img[y:y+h,x:x+w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
        cv2.imshow('img',img)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
    cv2.destroyAllWindows()
    cap.release()
项目:Learn-to-identify-similar-images    作者:MashiMaroLjc    | 项目源码 | 文件源码
def detectFaces(image_path):
    """
    Open the image based on the image_path and find all faces in the image.
    Finally, return the coordinates , width and height as a list
    """
    img = cv2.imread(image_path)

    face_cascade = cv2.CascadeClassifier("cvdata\\haarcascades\\haarcascade_frontalface_default.xml")
    if img.ndim == 3:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        gray = img 


    faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(10,10),
                                     flags=cv2.CASCADE_SCALE_IMAGE)
    result = []

    for (x,y,width,height) in faces:
        result.append((x,y,x+width,y+height))
    return result
项目:Learn-to-identify-similar-images    作者:MashiMaroLjc    | 项目源码 | 文件源码
def detect_faces(image):

    face_cascade1 = cv2.CascadeClassifier(XML_PATH1)
    if image.ndim == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image 

    faces = face_cascade1.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(10,10),
                                     flags=cv2.CASCADE_SCALE_IMAGE)


    result=[]

    for (x,y,width,height) in faces :
        result.append((x,y,x+width,y+height))
    return result
项目:deepvisualminer    作者:pathbreak    | 项目源码 | 文件源码
def detect(img_file, detector_xml_path, dest_img_file):
    img = cv2.imread(img_file)

    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    detector = cv2.CascadeClassifier(detector_xml_path)

    min_size = (min(50, gray_img.shape[0] // 10), min(50, gray_img.shape[1] // 10))
    hits = detector.detectMultiScale(gray_img, 1.1, 4, 0, min_size)
    #cv2.groupRectangles(hits, 2)
    print(hits)

    hits_img = np.copy(img)
    for (x,y,w,h) in hits:
        cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)
    cv2.imwrite(dest_img_file, hits_img)
项目:ObjectExtractor    作者:ducthienbui97    | 项目源码 | 文件源码
def detect(cls,
               image,
               min_size=(50, 50),
               scale_factor=1.1,
               min_neighbors=5,
               cascade_file=_current_cascade):
        """ Return list of objects detected.
        image -- The image (numpy matrix) read by readImage function.
        min_size -- Minimum possible object size. Objects smaller than that are ignored (default (50,50)).
        scale_factor -- Specifying how much the image size is reduced at each image scale (default 1.1).
        min_neighbors -- Specifying how many neighbors each candidate rectangle should have to retain it (default 5).
        cascade_file  -- The path of cascade xml file use for detection (default current value)
        """

        classifier = cls._classifier
        if cascade_file != cls._current_cascade:
            classifier = cv2.CascadeClassifier(cascade_file)

        gray_image = cls.bgr_to_gray(image)
        return classifier.detectMultiScale(gray_image,
                                           scaleFactor=scale_factor,
                                           minNeighbors=min_neighbors,
                                           minSize=min_size)
项目:FaceRecoginition    作者:ProHiryu    | 项目源码 | 文件源码
def test_file():
    count = 1
    face_cascade = cv2.CascadeClassifier(
        '/usr/local/opt/opencv3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')

    argvs = sys.argv
    for argv in argvs[1:]:
        img = cv2.imread(argv)

        if type(img) != str:
            try:
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                print('convert succeed')
            except:
                print('can not convert to gray image')
                continue
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                f = cv2.resize(gray[y:(y + h), x:(x + w)], (128, 128))
                model = load_model('/Users/songheqi/model/model.h5')
                num, acc = predict(model, f, 128)
                name_list = read_name_list('/Users/songheqi/train_set/')
                print('The {} picture is '.format(count) +
                      name_list[num] + ' acc : ', acc)
                count += 1
项目:trojan    作者:Hackerl    | 项目源码 | 文件源码
def face_detect(path,file_name):
        #????
        img     = cv2.imread(path)
        #????????
        cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")

        #????????
        rects   = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))

        if len(rects) == 0:
                return False

        rects[:, 2:] += rects[:, :2]

    # highlight the faces in the image
    #????
项目:async_face_recognition    作者:dpdornseifer    | 项目源码 | 文件源码
def _cascade_detect(self, raw_image):
        ''' use opencv cascades to recognize objects on the incomming images '''
        cascade = cv2.CascadeClassifier(self._cascade)
        image = np.asarray(bytearray(raw_image), dtype="uint8")

        gray_image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)
        color_image = cv2.imdecode(image, cv2.IMREAD_ANYCOLOR)

        coordinates = cascade.detectMultiScale(
            gray_image,
            scaleFactor=1.15,
            minNeighbors=5,
            minSize=(30, 30)
        )

        for (x, y, w, h) in coordinates:
            cv2.rectangle(color_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            self._logger.debug("face recognized at: x: {}, y: {}, w: {}, h: {}".format(x, y, w, h))

        return color_image, self._tojson(coordinates)
项目:blog    作者:benhoff    | 项目源码 | 文件源码
def __init__(self,
                 face_classifier_filepath=None,
                 eye_classifier_filepath=None,
                 parent=None):

        super().__init__(parent)
        if face_classifier_filepath is None:
            face_classifier_filepath = get_haarcascade_filepath()
        if eye_classifier_filepath is None:
            eye_classifier_filepath = get_haarcascade_filepath('eyes')

        self.fisher_faces = cv2.faces.createFisherFaceRecognizer()
        # Need an integer as the key, and image as the
        self._images = {}
        self._eye_classifier = cv2.CascadeClassifier(eye_classifier_filepath)
        # TODO: decide if I want to do this here, or just faces in.
        # self._face_classifier = cv2.CascadeClassifier(face_classifier_filepath)
项目:OpenCV_FaceDetect    作者:csuldw    | 项目源码 | 文件源码
def detectByClf(image_name, clf):
    img = cv2.imread(image_name)
    smiles_cascade = cv2.CascadeClassifier(clf)
    #??img???3???????????????gray?????3??2????????
    if img.ndim == 3:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        gray = img 

    print "start detecting..."    
    zones = smiles_cascade.detectMultiScale(gray, 1.3, 5)
    result = []
    for (x, y, width, height) in zones:
        result.append((x, y, x+width, y+height))
    print "end detecting."    
    return result


#??????????outpath???
项目:garden.facelock    作者:kivy-garden    | 项目源码 | 文件源码
def face_recognize(self):
        cap = cv2.VideoCapture(self.index)

        face_cascade = cv2.CascadeClassifier(self.cascade)
        '''
        face_cascade: cascade is entered here for further use.
        '''

        while(True):
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            '''
            Converts coloured video to black and white(Grayscale).
            '''
            if np.any(face_cascade.detectMultiScale(gray, 1.3, 5)):

                print("Cascade found")

                self.dispatch('on_match')

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break

            else:
                print("Not recognized")

            cv2.imshow('frame', frame)
            #Comment the above statement not to show the camera screen
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print("Forcefully Closed")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
        cap.release()
项目:face    作者:MOluwole    | 项目源码 | 文件源码
def __init__(self, matric_num):
        WHITE = [255, 255, 255]

        face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
        eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml')

        ID = NameFind.AddName(matric_num)
        Count = 0
        cap = cv2.VideoCapture(0)  # Camera object
        self.__trainer__ = None

        if not os.path.exists('dataSet'):
            os.makedirs('dataSet')

        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert the Camera to grayScale
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)  # Detect the faces and store the positions
            for (x, y, w, h) in faces:  # Frames  LOCATION X, Y  WIDTH, HEIGHT
                FaceImage = gray[y - int(h / 2): y + int(h * 1.5),
                            x - int(x / 2): x + int(w * 1.5)]  # The Face is isolated and cropped
                Img = (NameFind.DetectEyes(FaceImage))
                cv2.putText(gray, "FACE DETECTED", (x + (w / 2), y - 5), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE)
                if Img is not None:
                    frame = Img  # Show the detected faces
                else:
                    frame = gray[y: y + h, x: x + w]
                cv2.imwrite("dataSet/" + matric_num.replace('/', '') + "." + str(ID) + "." + str(Count) + ".jpg", frame)
                Count = Count + 1
                # cv2.waitKey(300)
                cv2.imshow("CAPTURED PHOTO", frame)  # show the captured image
            cv2.imshow('Face Recognition System Capture Faces', gray)  # Show the video
            if Count == 150:
                Trainer()
                break
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        print 'FACE CAPTURE FOR THE SUBJECT IS COMPLETE'
        cap.release()
        cv2.destroyAllWindows()
项目:face    作者:MOluwole    | 项目源码 | 文件源码
def __init__(self):

        face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
        eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml')

        recognise = cv2.face.createEigenFaceRecognizer(15, 4000)  # creating EIGEN FACE RECOGNISER
        recognise.load("Recogniser/trainingDataEigan.xml")  # Load the training data

        # -------------------------     START THE VIDEO FEED ------------------------------------------
        cap = cv2.VideoCapture(0)  # Camera object
        # cap = cv2.VideoCapture('TestVid.wmv')   # Video object
        ID = 0
        while True:
            ret, img = cap.read()  # Read the camera object
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert the Camera to gray
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)  # Detect the faces and store the positions
            for (x, y, w, h) in faces:  # Frames  LOCATION X, Y  WIDTH, HEIGHT
                # ------------ BY CONFIRMING THE EYES ARE INSIDE THE FACE BETTER FACE RECOGNITION IS GAINED ------------------
                gray_face = cv2.resize((gray[y: y + h, x: x + w]), (110, 110))  # The Face is isolated and cropped
                eyes = eye_cascade.detectMultiScale(gray_face)
                for (ex, ey, ew, eh) in eyes:
                    ID, conf = recognise.predict(gray_face)  # Determine the ID of the photo
                    NAME = NameFind.ID2Name(ID, conf)
                    NameFind.DispID(x, y, w, h, NAME, gray)
            cv2.imshow('EigenFace Face Recognition System', gray)  # Show the video
            if cv2.waitKey(1) & 0xFF == ord('q'):  # Quit if the key is Q
                break
        cap.release()
        cv2.destroyAllWindows()
项目:Glidr    作者:muinmomin    | 项目源码 | 文件源码
def detect_face(image):
    faceCascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.3,
        minNeighbors=3,
        minSize=(30, 30),
        flags = cv2.cv.CV_HAAR_SCALE_IMAGE
    )
    return faces if len(faces) else None
项目:hardware_demo    作者:llSourcell    | 项目源码 | 文件源码
def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # convert the image to grayscale, load the face cascade detector,
            # and detect faces in the image
            # Using data trained from here:
            #   http://www.pyimagesearch.com/2015/05/11/creating-a-face-detection-api-with-python-and-opencv-in-just-5-minutes/
            image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            detector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
            rects = detector.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

            # construct a list of bounding boxes from the detection
            self.rects = [(int(x), int(y), int(x + w), int(y + h)) for (x, y, w, h) in rects]

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def findfaces(image):

    thisdirectory = os.path.dirname(os.path.realpath(__file__))

    haarcascadeFolder = os.path.join(thisdirectory,"haarcascades")
    cascPath = os.path.join(haarcascadeFolder, "haarcascade_frontalface_default.xml")

    #cascPath = os.path.join(haarcascadeFolder, "haarcascade_upperbody.xml")
    #cascPath = os.path.join(haarcascadeFolder, "haarcascade_fullbody.xml")
    #cascPath = os.path.join(haarcascadeFolder, "haarcascade_russian_plate_number.xml")

    # Create the haar cascade
    faceCascade = cv2.CascadeClassifier(cascPath)

    # Read the image
    height, width, depth = image.shape
    scale = 1
    if (width > 1024):
        scale = 1024.0/width
        image = cv2.resize(image, None, fx=scale, fy=scale)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Detect faces in the image
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.05,
        minNeighbors=5,
        minSize=(30, 30),
    )

    return [scale_rect(face, 1/scale) for face in faces]
项目:tensorflow-pi    作者:karaage0703    | 项目源码 | 文件源码
def face_detect():
    image = cv2.imread(face_filename)
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade_f = cv2.CascadeClassifier(path.join(cascades_dir, 'haarcascade_frontalface_alt2.xml'))
    cascade_e = cv2.CascadeClassifier(path.join(cascades_dir, 'haarcascade_eye.xml'))

    facerect = cascade_f.detectMultiScale(image_gray, scaleFactor=1.08, minNeighbors=1, minSize=(200, 200))

    # print("face rectangle")
    # print(facerect)

    image_face = []
    if len(facerect) > 0:
        # filename numbering
        numb = 0
        tmp_size = 0
        for rect in facerect:
            x, y, w, h = rect
            # eyes in face?
            roi = image_gray[y: y + h, x: x + w]
            eyes = cascade_e.detectMultiScale(roi, scaleFactor=1.05, minSize=(20,20))
            if len(eyes) > 1:
                if h > tmp_size:
                    tmp_size = h
                    image_face = image[y:y+h, x:x+h]

    return image_face
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def main(argv=None):  # pylint: disable=unused-argument
    face_cascade = cv2.CascadeClassifier("/home/neo/opencv-3.1.0/data/haarcascades/haarcascade_frontalface_default.xml")
    #image_path = "/home/neo/projects/deepLearning/data/image_exp2/"
    #image_path = "/home/neo/projects/deepLearning/data/ck_image_seq_10"
    image_path = "/home/neo/projects/deepLearning/data/amfed/happy"
    #dest_path = "/home/neo/projects/deepLearning/data/crop_faces_seq_10/"
    dest_path = "/home/neo/projects/deepLearning/data/amfed_faces"
    faces_to_detect = 1
    get_images(image_path, face_cascade, dest_path, faces_to_detect)
项目:RealtimeFacialEmotionRecognition    作者:sushant3095    | 项目源码 | 文件源码
def load_cascades():
    # Load Haar cascade files containing features
    cascPaths = ['models/haarcascades/haarcascade_frontalface_default.xml',
                 'models/haarcascades/haarcascade_frontalface_alt.xml',
                 'models/haarcascades/haarcascade_frontalface_alt2.xml',
                 'models/haarcascades/haarcascade_frontalface_alt_tree.xml'
                 'models/lbpcascades/lbpcascade_frontalface.xml']
    faceCascades = []
    for casc in cascPaths:
        faceCascades.append(cv.CascadeClassifier(casc))

    return faceCascades
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def __init__(self, hat_path=os.path.join(os.curdir, 'hat.png'),
                 cascade_path=os.path.join(
                     OPENCV_CASCADE_PATH, 'haarcascades',
                     'haarcascade_frontalface_default.xml'),
                 w_offset=1.3, x_offset=-20, y_offset=80, draw_box=False):
        # pragma pylint: disable=line-too-long
        """Initializes a `DrawHat` instance.

        Args:
            hat_path: The path to the hat file. Defaults to ./hat.png .
            cascade_path: The path to the face cascade file.
                          Defaults to
                          `cvloop.OPENCV_CASCADE_PATH/haarcascades/haarcascade_frontalface_default.xml`
            w_offset: Hat width additional scaling.
            x_offset: Number of pixels right to move hat.
            y_offset: Number of pixels down to move hat.
            draw_box: If True, draws boxes around detected faces.
        """
        # pragma pylint: enable=line-too-long
        self.w_offset = w_offset
        self.x_offset = x_offset
        self.y_offset = y_offset
        self.draw_box = draw_box

        self.cascade = cv2.CascadeClassifier(cascade_path)
        self.hat = self.load_hat(hat_path)
项目:real_time_face_detection    作者:Snowapril    | 项目源码 | 文件源码
def main(FLAG):
    Model = SimpleModel(FLAG.input_dim, FLAG.hidden_dim, FLAG.output_dim, optimizer=tf.train.RMSPropOptimizer(FLAG.learning_rate), using_gpu=False)

    image_path = sys.argv[1]
    cascPath = "./haarcascade_frontalface_default.xml"

    faceCascade = cv2.CascadeClassifier(cascPath)

    image = cv2.imread(image_path)
    src_height, src_width, src_channels = image.shape
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        flags = cv2.CASCADE_SCALE_IMAGE
    )

    for x, y, w, h in faces:
        print("faceLocation : ({},{}), width={}, height={}".format(x,y,w,h))
        cropped_image = gray[x:x+w, y:y+h]
        resized_image = imresize(cropped_image, (FLAG.Width, FLAG.Height))
        resized_image = resized_image.flatten() / 255

        pred_feature = Model.predict(resized_image).flatten()
        pred_feature[::2] = pred_feature[::2] * w + x
        pred_feature[1::2] = pred_feature[1::2] * h + y

    result_img = draw_features_point_on_image(image, [pred_feature], src_width, src_height)
    print(pred_feature)
    for (x, y, w, h) in faces:
        cv2.rectangle(result_img, (x, y), (x+w, y+h), (0, 255, 0), 1)

    cv2.imshow('Result', result_img)
    cv2.imwrite("./result_img.png", result_img)
    cv2.waitKey(0)

    cv2.destroyAllWindows()
项目:CoffeeRobot    作者:ciauri    | 项目源码 | 文件源码
def detect():
    stream = io.BytesIO()

        #Get the picture (low resolution, so it should be quite fast)
        #Here you can also specify other parameters (e.g.:rotate the image)
    with picamera.PiCamera() as camera:
        camera.resolution = (700, 525)
        camera.capture(stream, format='jpeg')

    buff = np.fromstring(stream.getvalue(), dtype=np.uint8)

    #Now creates an OpenCV image
    img = cv2.imdecode(buff, 1)


    #img = cv2.imread('coffee.jpg')
    face_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/coffeePot.xml')
    eye_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/liquid.xml')

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.2, 500, minSize=(80,100))
    for (x,y,w,h) in faces:
        img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray, 1.2, 10, minSize=(70,50))
        return houghlines(roi_color,h)
项目:CoffeeRobot    作者:ciauri    | 项目源码 | 文件源码
def detect():
    stream = io.BytesIO()

        #Get the picture (low resolution, so it should be quite fast)
        #Here you can also specify other parameters (e.g.:rotate the image)
    with picamera.PiCamera() as camera:
        camera.resolution = (700, 525)
        camera.capture(stream, format='jpeg')

    buff = np.fromstring(stream.getvalue(), dtype=np.uint8)

    #Now creates an OpenCV image
    img = cv2.imdecode(buff, 1)


    #img = cv2.imread('coffee.jpg')
    face_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/coffeePot.xml')
    eye_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/liquid.xml')

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.2, 500, minSize=(80,100))
    for (x,y,w,h) in faces:
        img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray, 1.2, 10, minSize=(70,50))
        return houghlines(roi_color,x,y,w,h)
项目:Comicolorization    作者:DwangoMediaVillage    | 项目源码 | 文件源码
def __init__(self, paths, classifier_path, input_resize=None, output_resize=None, root='.', margin_ratio=0.3):
        """
        :param paths: image files :see: https://github.com/pfnet/chainer/blob/master/chainer/datasets/image_dataset.py
        :param classifier_path: XML of pre-trained face detector.
        You can find it from https://github.com/opencv/opencv/tree/master/data/haarcascades
        :param input_resize: set it if you want to resize image **before** running face detector
        :param output_resize: target size of output image
        """
        super().__init__(paths=paths, resize=input_resize, root=root)
        self.classifier = cv2.CascadeClassifier(classifier_path)
        self.margin_ratio = margin_ratio
        self.output_resize = output_resize
项目:party-pi    作者:JustinShenk    | 项目源码 | 文件源码
def load_detection_model(model_path='/usr/local/opt/opencv3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'):
    if not os.path.exists(model_path):
        # Try alternative file path
        local_cascade_path = 'face.xml'
        if not os.path.exists(local_cascade_path):
            raise NameError('File not found:', local_cascade_path)
        model_path = local_cascade_path
    detection_model = cv2.CascadeClassifier(model_path)
    return detection_model
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def __init__(self,option_type,path):

        self.face_cascade = cv2.CascadeClassifier("cascade/haarcascade_frontalface_default.xml")
        self.eye_cascade = cv2.CascadeClassifier("cascade/haarcascade_eye.xml")
        self.smile_cascade = cv2.CascadeClassifier("cascade/haarcascade_smile.xml")
        self.shape_predictor = "cascade/shape_predictor_68_face_landmarks.dat"
        self.facedetect = False
        self.functioncall = option_type
        self.sourcepath = path
        self.image_path = None
        self.video_path = None
        self.webcam_path = None
        self.main_function()