Python dlib 模块,rectangle() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用dlib.rectangle()

项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def correlation_tracker(self):
        flag = time.time()
        for track in self.tracks:
            if track.source == 'tracking':
                if track.updated:
                                        print '===!!!==='
                                        print 'Track updated!!!'
                    # use .start_tracking method
                    track.tracker.start_track(self.img, \
                        dlib.rectangle(*track.updatebox))
                    track.updated = False
                else:
                    # use .update method
                    track.tracker.update(self.img)

                        rect = track.tracker.get_position()
                        track.bbox = [int(rect.left()),\
                              int(rect.top()), \
                              int(rect.right()),\
                              int(rect.bottom())]

                                print 'Track one frame', time.time()-flag
                        else:
                # do nothing
                pass
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        #return list(map(lambda p: (p.x, p.y), points.parts()))
        return [(p.x, p.y) for p in points.parts()]

    #pylint: disable=dangerous-default-value
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        #return list(map(lambda p: (p.x, p.y), points.parts()))
        return [(p.x, p.y) for p in points.parts()]

    #pylint: disable=dangerous-default-value
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        #return list(map(lambda p: (p.x, p.y), points.parts()))
        return [(p.x, p.y) for p in points.parts()]

    #pylint: disable=dangerous-default-value
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        #return list(map(lambda p: (p.x, p.y), points.parts()))
        return [(p.x, p.y) for p in points.parts()]

    #pylint: disable=dangerous-default-value
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        #return list(map(lambda p: (p.x, p.y), points.parts()))
        return [(p.x, p.y) for p in points.parts()]

    #pylint: disable=dangerous-default-value
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def OPENCV_getAllFaceBoundingBoxes(self, rgbImg):
        """
        Find all face bounding boxes in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :return: All face bounding boxes in an image.
        :rtype: opencv.rectangles
        """
        assert rgbImg is not None

        lit=[]
        try:
            faces = self.OPENCV_Detector.detectMultiScale(rgbImg)

            for (x, y, w, h) in faces:
                lit.append(dlib.rectangle(int(x),int(y),int(x+w),int(y+h)))
            return lit

        except Exception as e:
            print("Warning: {}".format(e))
            # In rare cases, exceptions are thrown.
            return []
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.HOG_getAllFaceBoundingBoxes(rgbImg)
        if (len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        return list(map(lambda p: (p.x, p.y), points.parts()))
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def predictNewLocationsOfTracks(self):
        global log, idx
        if self.tracks:
            #log[idx,0] = self.tracks[0].consecutiveInvisibleCount
            #log[idx,:4] = self.tracks[1].bbox[:4]
            #log[idx,4] = self.tracks[1].consecutiveInvisibleCount

            if True:
                img = cv2.imread('/home/yaos11/zhouyz/CTT/img/bolt/%04d.jpg'%self.idx)
                assert img != None
                                # print '===!!!===!!!===', img.shape
                                for track in self.tracks:
                    box = track.bbox
                    pt1=(int(box[0]),int(box[1]))
                    pt2=(int(box[2]),int(box[3]))
                    cv2.rectangle(img,pt1,pt2,(255,255,255),3)

                cv2.imshow('Vedio',img)
                cv2.waitKey(1)
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def CT_run(self, img, img_old):
        """ CT_run:
            -------
            When called, it will // CT.update // the Correlation Tracker once.
            if self.UPDATED, it will call // CT.start_track //.
        """
        if self.UPDATED:
            self.UPDATED = False # Ok, now updated
            self.CT.start_track(img_old, dlib.rectangle(*self.CT_box_update))
            self._CT_turn_new_to_old() # turn new to old

        self.CT.update(img)
        # get current position and update // CT_box //
        rect = self.CT.get_position()
        self.CT_box = [int(rect.left()),  int(rect.top()), \
                    int(rect.right()), int(rect.bottom())]
        # if self.UPDATED:
        _CT_turn_new_to_old()

        return
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def CT_run(self, img, img_old):
        """ CT_run:
            -------
            When called, it will // CT.update // the Correlation Tracker once.
            if self.UPDATED, it will call // CT.start_track //.
        """
        if self.UPDATED:
            self.UPDATED = False # Ok, now updated
            self.CT.start_track(img_old, dlib.rectangle(*self.CT_box_update))
            self._CT_turn_new_to_old() # turn new to old

        self.CT.update(img)
        # get current position and update // CT_box //
        rect = self.CT.get_position()
        self.CT_box = [int(rect.left()),  int(rect.top()), \
                    int(rect.right()), int(rect.bottom())]
        # if self.UPDATED:
项目:DelaunayVisualization-FacialWarp    作者:sneha-belkhale    | 项目源码 | 文件源码
def get_landmarks(im):
    #get a bounding rectangle for the primary face in the image
    rects = cascade.detectMultiScale(im, 1.3, 5)
    # only get the x y w h coordinates of the first face detected
    x, y, w, h = rects[0].astype(long)
    # define a rectangle that will contain the face
    rect = dlib.rectangle(x, y, x + w, y + h)

    # use our predictor to find the facial points within our bounding box
    face_points = predictor(im, rect).parts()

    #save our results in an array
    landmarks = []
    for p in face_points:
        landmarks.append([p.x, p.y])
    return landmarks
项目:real-time-face-recognition    作者:iwantooxxoox    | 项目源码 | 文件源码
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
        """
        Find the largest face bounding box in an image.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The largest face bounding box in an image, or None.
        :rtype: dlib.rectangle
        """
        assert rgbImg is not None

        faces = self.getAllFaceBoundingBoxes(rgbImg)
        if (not skipMulti and len(faces) > 0) or len(faces) == 1:
            return max(faces, key=lambda rect: rect.width() * rect.height())
        else:
            return None
项目:real-time-face-recognition    作者:iwantooxxoox    | 项目源码 | 文件源码
def findLandmarks(self, rgbImg, bb):
        """
        Find the landmarks of a face.

        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to find landmarks for.
        :type bb: dlib.rectangle
        :return: Detected landmark locations.
        :rtype: list of (x,y) tuples
        """
        assert rgbImg is not None
        assert bb is not None

        points = self.predictor(rgbImg, bb)
        #return list(map(lambda p: (p.x, p.y), points.parts()))
        return [(p.x, p.y) for p in points.parts()]

    #pylint: disable=dangerous-default-value
项目:FaceRecognition    作者:fonfonx    | 项目源码 | 文件源码
def get_landmarks(img, detect_face=True):
    """ Return the landmarks of the image """
    w, h = img.shape[:2]

    # default values
    x = 0.08 * w
    y = 0.08 * h
    w = 0.84 * w
    h = 0.84 * h
    x, y, w, h = convert_to_long(x, y, w, h)

    if detect_face:
        rects = cascade.detectMultiScale(img, 1.3, 5)
        if len(rects) >= 1:
            rects = rects[np.argsort(rects[:, 3])[::-1]]
            x, y, w, h = rects[0].astype(long)
            x = x.item()
            y = y.item()
            w = w.item()
            h = h.item()
    rect = dlib.rectangle(x, y, x + w, y + h)
    return np.array([(p.x, p.y) for p in predictor(img, rect).parts()])
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_facial_landmarks_from_mask(img, pts):
    rect = cv2.boundingRect(pts)
    rect = dlib.rectangle(rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3])
    return np.matrix([list(pt) for pt in pts]), rect
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def toRoi(rect):
    return dlib.rectangle(0, 0, rect.right() - rect.left(), rect.bottom() - rect.top())
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None
        assert bb is not None

        bb_dlib = dlib.rectangle(left=bb[0], top=bb[1], right=bb[2], bottom=bb[3])
        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb_dlib)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:video_labeler    作者:hahnyuan    | 项目源码 | 文件源码
def start(self,im,p0,p1):
        # p0 is leftupper position of the obj, p1 is rightbottom of the obj
        self.t.start_track(im, dlib.rectangle(p0[0],p0[1], p1[0], p1[1]))
项目:FaceSwap    作者:MarekKowalski    | 项目源码 | 文件源码
def getFaceKeypoints(img, detector, predictor, maxImgSizeForDetection=640):
    imgScale = 1
    scaledImg = img
    if max(img.shape) > maxImgSizeForDetection:
        imgScale = maxImgSizeForDetection / float(max(img.shape))
        scaledImg = cv2.resize(img, (int(img.shape[1] * imgScale), int(img.shape[0] * imgScale)))


    #detekcja twarzy
    dets = detector(scaledImg, 1)

    if len(dets) == 0:
        return None

    shapes2D = []
    for det in dets:
        faceRectangle = rectangle(int(det.left() / imgScale), int(det.top() / imgScale), int(det.right() / imgScale), int(det.bottom() / imgScale))

        #detekcja punktow charakterystycznych twarzy
        dlibShape = predictor(img, faceRectangle)

        shape2D = np.array([[p.x, p.y] for p in dlibShape.parts()])
        #transpozycja, zeby ksztalt byl 2 x n a nie n x 2, pozniej ulatwia to obliczenia
        shape2D = shape2D.T

        shapes2D.append(shape2D)

    return shapes2D
项目:EAC-Net    作者:wiibrew    | 项目源码 | 文件源码
def get_faceim_shape(im):
    rect=dlib.rectangle(0,0,im.shape[1],im.shape[0])
    face_pts=np.ndarray((68,2))
    shape = predictor(im, rect)
    for i in range(68):
        face_pts[i,0]=shape.part(i).x
        face_pts[i,1]=shape.part(i).y
    return face_pts
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def get_landmarks(self,img,box=None,left=None,top=None,right=None,bottom=None):
        if box is not None:
            left,top,right,bottom = box
        left = np.long(left)
        top = np.long(top)
        right = np.long(right)
        bottom = np.long(bottom)
        bb = dlib.rectangle(left,top,right,bottom)
        landmarks = self.align_tool.findLandmarks(img,bb)
        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(self.landmarkIndices)
        return npLandmarks[npLandmarkIndices]
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def segment(self,img,box,landmarks):
        left,top,right,bottom = box
        left,top,right,bottom = int(left),int(top),int(right),int(bottom)
        bb = dlib.rectangle(left,top,right,bottom)
        H = cv2.getAffineTransform(landmarks,
                               self.imgDim * MINMAX_TEMPLATE[self.npLandmarkIndices] * self.scale + self.imgDim * (1 - self.scale)/2)
        thumbnail = cv2.warpAffine(img, H, (self.imgDim, self.imgDim))
        return [('2d-align',thumbnail)]
项目:faceSwapPython    作者:arijitx    | 项目源码 | 文件源码
def get_landmarks(im):
    rects = cascade.detectMultiScale(im, 1.3,5)
    #if len(rects) > 1:
    #    raise TooManyFaces
    if len(rects) == 0:
        raise NoFaces
    print len(rects)
    x,y,w,h =rects[0]
    rect=dlib.rectangle(x,y,x+w,y+h)
    return numpy.matrix([[p.x, p.y] for p in predictor(im, rect).parts()])
项目:experimenting-with-sort    作者:ZidanMusk    | 项目源码 | 文件源码
def __init__(self,bbox,img):
    self.tracker = correlation_tracker()
    self.tracker.start_track(img,rectangle(long(bbox[0]),long(bbox[1]),long(bbox[2]),long(bbox[3])))
    self.confidence = 0. # measures how confident the tracker is! (a.k.a. correlation score)

    self.time_since_update = 0
    self.id = CorrelationTracker.count
    CorrelationTracker.count += 1
    self.hits = 0
    self.hit_streak = 0
    self.age = 0
项目:experimenting-with-sort    作者:ZidanMusk    | 项目源码 | 文件源码
def update(self,bbox,img):
    self.time_since_update = 0
    self.hits += 1
    self.hit_streak += 1

    '''re-start the tracker with detected positions (it detector was active)'''
    if bbox != []:
      self.tracker.start_track(img, rectangle(long(bbox[0]), long(bbox[1]), long(bbox[2]), long(bbox[3])))
    '''
    Note: another approach is to re-start the tracker only when the correlation score fall below some threshold
    i.e.: if bbox !=[] and self.confidence < 10.
    but this will reduce the algo. ability to track objects through longer periods of occlusions.
    '''
项目:face_recognition    作者:ageitgey    | 项目源码 | 文件源码
def _css_to_rect(css):
    """
    Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object

    :param css:  plain tuple representation of the rect in (top, right, bottom, left) order
    :return: a dlib `rect` object
    """
    return dlib.rectangle(css[3], css[0], css[1], css[2])
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def diaplay(self):
        for track in self.tracks:
            box = track.bbox
            pt1 = (int(box[0]),int(box[1]))
            pt2 = (int(box[2]),int(box[3]))
            cv2.rectangle(self.oldimg,pt1,pt2,(255,255,255),3)

        cv2.imshow(self.oldimg)
        cv2.waitKey(1)
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def display(self):
        """docstring for display"""
        for track in self.tracks:
            if track.DISPLAY:
                bbox = track.box
                pt1 = (bbox[0],bbox[1])
                pt2 = (bbox[2],bbox[3])
                cv2.rectangle(self.img, pt1, pt2, (255,255,255), 2)
                # pass # we will display its bbox

        cv2.imshow('Vedio', self.img)
        cv2.waitKey(1)
        print 'SHOW'
        return
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def display(self):
        """docstring for display"""
        for track in self.tracks:
            if track.DISPLAY:
                bbox = track.box
                pt1 = (bbox[0],bbox[1])
                pt2 = (bbox[2],bbox[3])
                cv2.rectangle(self.img, pt1, pt2, (255,255,255), 2)
                # pass # we will display its bbox

        cv2.imshow('Vedio', self.img)
        cv2.waitKey(1)
        print 'SHOW'
        return
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def display(self):
        """docstring for display"""
        for track in self.tracks:
            if track.DISPLAY:
                bbox = track.box
                pt1 = (bbox[0],bbox[1])
                pt2 = (bbox[2],bbox[3])
                cv2.rectangle(self.img, pt1, pt2, (255,255,255), 2)
                # pass # we will display its bbox

        cv2.imshow('Vedio', self.img)
        cv2.waitKey(1)
        print 'SHOW'
        return
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def showIMG(img, box, time=10):
    cv2.rectangle(img, box[0:1], box[2:3], 
            (255,255,255), 2)
    cv2.imshow('Image', img)
    cv2.waitKey(time)
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def start_tracking():
    global updated
    global i, img_count
    global updtbox, oldbox, crtbox
    while i <= img_count:
        # get a new frame
        img = cv2.imread(imdb_path+'/%04d.jpg'%i)
        # update the tracker
        if updated:
        # tracker.start_track()
            tracker.start_track(img, 
        dlib.rectangle(*updtbox))
        oldbox = updtbox
        updated = False
        # post a new frame
        trd_post = Thread(target=postIMG)
        trd_post.start()
    else:
        # tracker.update()
        tracker.update(img)

    rect = tracker.get_position()
    pt1 = [int(rect.left()), int(rect.top())]
        pt2 = [int(rect.right()),int(rect.bottom())]
        crtbox = pt1 + pt2
        f.write(str(crtbox)+'\n')
        if i%10 == 0:
            print 'frame',i,'returns',crtbox
        if showimg:
            showIMG(img, crtbox, 2000)

        # next frame
        i +=1
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def showIMG(img, box, time=10):
    cv2.rectangle(img, box[0:1], box[2:3], 
            (255,255,255), 2)
    cv2.imshow('Image', img)
    cv2.waitKey(time)
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def start_tracking():
    global updated
    global i, img_count
    global updtbox, oldbox, crtbox
    while i <= img_count:
        # get a new frame
        img = cv2.imread(imdb_path+'/%04d.jpg'%i)
        # update the tracker
        if updated:
        # tracker.start_track()
            tracker.start_track(img, 
        dlib.rectangle(*updtbox))
        oldbox = updtbox
        updated = False
        # post a new frame
        trd_post = Thread(target=postIMG)
        trd_post.start()
    else:
        # tracker.update()
        tracker.update(img)

    rect = tracker.get_position()
    pt1 = [int(rect.left()), int(rect.top())]
        pt2 = [int(rect.right()),int(rect.bottom())]
        crtbox = pt1 + pt2
        f.write(str(crtbox)+'\n')
        if i%10 == 0:
            print 'frame',i,'returns',crtbox
        if showimg:
            showIMG(img, crtbox, 2000)

        # next frame
        i +=1
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def init_firstframe_by_grdtruth():
    global updtbox, oldbox, crtbox
    gtfile = open(imdb_path+'/'+'groundtruth_rect.txt','r')
    line = gtfile.readline()
    points = line[:-1].split(',')
    points = map(int, points)
    points[2] += points[0]
    points[3] += points[1]
    gtfile.close()
    crtbox = points
    updtbox = crtbox
    oldbox = crtbox
    img = cv2.imread(imdb_path+'/0001.jpg')
    tracker.start_track(img, dlib.rectangle(*crtbox))
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def showIMG(img, box, time=10):
    cv2.rectangle(img, box[0:1], box[2:3], 
            (255,255,255), 2)
    cv2.imshow('Image', img)
    cv2.waitKey(time)
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def start_tracking():
    global updated
    global i, img_count
    global updtbox, oldbox, crtbox
    while i <= img_count:
        # get a new frame
        img = cv2.imread(imdb_path+'/%04d.jpg'%i)
        # update the tracker
        if updated:
        # tracker.start_track()
            tracker.start_track(img, 
        dlib.rectangle(*updtbox))
        oldbox = updtbox
        updated = False
        # post a new frame
        trd_post = Thread(target=postIMG)
        trd_post.start()
    else:
        # tracker.update()
        tracker.update(img)

    rect = tracker.get_position()
    pt1 = [int(rect.left()), int(rect.top())]
        pt2 = [int(rect.right()),int(rect.bottom())]
        crtbox = pt1 + pt2
        f.write(str(crtbox)+'\n')
        if i%10 == 0:
            print 'frame',i,'returns',crtbox
        if showimg:
            showIMG(img, crtbox, 2000)

        # next frame
        i +=1
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def CT_run(self, img, img_old, img_last):
        """ CT_run:
            -------
            When called, it will // CT.update // the Correlation Tracker once.
            if self.UPDATED, it will call // CT.start_track //.
        """
        if self.UPDATED:
            if self.UPDATE_ASSIGN:
                self.CT.start_track(img_old, dlib.rectangle(*self.CT_box_update))
            else:
                self.CT.start_track(img_last, dlib.rectangle(*self.CT_box_update))

            self._CT_turn_new_to_old() # turn new to old

        self.CT.update(img)
        # get current position and update // CT_box //
        rect = self.CT.get_position()
        self.CT_box = [int(rect.left()),  int(rect.top()), \
                    int(rect.right()), int(rect.bottom())]
        if self.UPDATED:
            pass
            # _CT_turn_new_to_old()

        self.UPDATED = False

        return
项目:CTT    作者:ZhouYzzz    | 项目源码 | 文件源码
def display(self):
        """docstring for display"""
        for track in self.tracks:
            if track.DISPLAY:
                bbox = track.box
                pt1 = (bbox[0],bbox[1])
                pt2 = (bbox[2],bbox[3])
                cv2.rectangle(self.img, pt1, pt2, (255,255,255), 2)
                # pass # we will display its bbox

        cv2.imshow('Vedio', self.img)
        cv2.waitKey(1)
        print 'SHOW'
        return
项目:Chakshu    作者:sachinkum    | 项目源码 | 文件源码
def _css_to_rect(css):
    """
    Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object
    :param css:  plain tuple representation of the rect in (top, right, bottom, left) order
    :return: a dlib `rect` object
    """
    return dlib.rectangle(css[3], css[0], css[1], css[2])
项目:DelaunayVisualization-FacialWarp    作者:sneha-belkhale    | 项目源码 | 文件源码
def drawColoredTriangles(img, triangleList, disp):
    #sort the triangle list by distance from the top left corner in order to get a gradient effect when drawing triangles
    triangleList=sorted(triangleList, cmp=triDistanceSort)
    h, w, c = img.shape
    #get bounding rectangle points of image
    r = (0, 0, w, h)
    #iterate through and draw all triangles in the list
    for idx, t in enumerate(triangleList):
        #grab individual vertex points
        pt1 = [t[0], t[1]]
        pt2 = [t[2], t[3]]
        pt3 = [t[4], t[5]]
        #select a position for displaying the enumerated triangle value
        pos = (t[2], t[3])
        #create the triangle
        triangle = np.array([pt1, pt2, pt3], np.int32)
        #select a color in HSV!! (manipulate idx for cool color gradients)
        color = np.uint8([[[idx, 100, 200]]])
        #color = np.uint8([[[0, 0, idx]]])
        #convert color to BGR
        bgr_color = cv2.cvtColor(color, cv2.COLOR_HSV2BGR)
        color = (int(bgr_color[(0, 0, 0)]), int(bgr_color[(0, 0, 1)]), int(bgr_color[(0, 0, 2)]))

        #draw the triangle if it is within the image bounds
        if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3):
            cv2.fillPoly(img, [triangle], color)
            # if display triangle number was selected, display the number.. this helps with triangle manipulation later
            if(disp==1):
                cv2.putText(img, str(idx), pos, fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, fontScale=0.3, color=(0, 0, 0))





######################################## example script ########################################
项目:cat-bbs    作者:aleju    | 项目源码 | 文件源码
def to_dlib_rect(self):
        import dlib
        return dlib.rectangle(left=self.x1, right=self.x2, top=self.y1, bottom=self.y2)
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb=None,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
              skipMulti=False, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)
            if bb is None:
                return

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail