Python cv2 模块,solvePnP() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用cv2.solvePnP()

项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_calib_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (y_num,x_num))
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (y_num,x_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "w")
            data = {'corners':ir_corners.tolist()}
            yaml.dump(data, depth_stream)

            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)


            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def rgb_callback(self,data):
        try:
            img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        ret, corners = cv2.findChessboardCorners(img, (x_num,y_num))
        cv2.imshow('img',img)
        cv2.waitKey(5)
        if ret == True:
            cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
            tempimg = img.copy()
            cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist)
            print("rvecs:")
            print(rvec)
            print("tvecs:")
            print(tvec)
            # project 3D points to image plane
            imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist)

            imgpts = np.int32(imgpts).reshape(-1,2)

            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4)  #BGR
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4)
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4)

            cv2.imshow('img',tempimg)
            cv2.waitKey(5)
项目:deeptracking    作者:lvsn    | 项目源码 | 文件源码
def detect(self, img):
        if len(img.shape) > 2:
            raise Exception("ChessboardDetector uses gray image as input")
        detection = None
        ret, corners = cv2.findChessboardCorners(img, self.chess_shape, None)
        if ret:
            ret, rvec, tvec = cv2.solvePnP(self.obj_points, corners, self.camera.matrix(), np.array([0, 0, 0, 0, 0]))
            # invert axis convention
            rvec[1] = -rvec[1]
            rvec[2] = -rvec[2]
            tvec[1] = -tvec[1]
            tvec[2] = -tvec[2]
            detection = Transform()
            detection.matrix[0:3, 0:3] = cv2.Rodrigues(rvec)[0]
            detection.set_translation(tvec[0] / 1000, tvec[1] / 1000, tvec[2] / 1000)
        return detection
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateExtrinsics(self, cameraParameters):
        '''
        Inputs:
        cameraParameters is CameraParameters object

        Calculate: rotate vector and transform vector

        >>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
        >>> print(marker.rvec, marker.tvec)
        '''
        object_points = np.zeros((4,3), dtype=np.float32)
        object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
        # Test Code.
        # object_points[:] -= 0.5
        marker_points = self.corners
        if marker_points is None: raise TypeError('The marker.corners is None')
        camera_matrix = cameraParameters.camera_matrix
        dist_coeff = cameraParameters.dist_coeff
        ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
                                       camera_matrix, dist_coeff)
        if ret: self.rvec, self.tvec = rvec, tvec
        return ret
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateExtrinsics(self, cameraParameters):
        '''
        Inputs:
        cameraParameters is CameraParameters object

        Calculate: rotate vector and transform vector

        >>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
        >>> print(marker.rvec, marker.tvec)
        '''
        object_points = np.zeros((4,3), dtype=np.float32)
        object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
        # Test Code.
        # object_points[:] -= 0.5
        marker_points = self.corners
        if marker_points is None: raise TypeError('The marker.corners is None')
        camera_matrix = cameraParameters.camera_matrix
        dist_coeff = cameraParameters.dist_coeff
        ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
                                       camera_matrix, dist_coeff)
        if ret: self.rvec, self.tvec = rvec, tvec
        return ret
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateExtrinsics(self, cameraParameters):
        '''
        Inputs:
        cameraParameters is CameraParameters object

        Calculate: rotate vector and transform vector

        >>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
        >>> print(marker.rvec, marker.tvec)
        '''
        object_points = np.zeros((4,3), dtype=np.float32)
        object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
        # Test Code.
        # object_points[:] -= 0.5
        marker_points = self.corners
        if marker_points is None: raise TypeError('The marker.corners is None')
        camera_matrix = cameraParameters.camera_matrix
        dist_coeff = cameraParameters.dist_coeff
        ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
                                       camera_matrix, dist_coeff)
        if ret: self.rvec, self.tvec = rvec, tvec
        return ret
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateExtrinsics(self, cameraParameters):
        '''
        Inputs:
        cameraParameters is CameraParameters object

        Calculate: rotate vector and transform vector

        >>> marker.calculateExtrinsics(camera_matrix, dist_coeff)
        >>> print(marker.rvec, marker.tvec)
        '''
        object_points = np.zeros((4,3), dtype=np.float32)
        object_points[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)
        # Test Code.
        # object_points[:] -= 0.5
        marker_points = self.corners
        if marker_points is None: raise TypeError('The marker.corners is None')
        camera_matrix = cameraParameters.camera_matrix
        dist_coeff = cameraParameters.dist_coeff
        ret, rvec, tvec = cv2.solvePnP(object_points, marker_points,
                                       camera_matrix, dist_coeff)
        if ret: self.rvec, self.tvec = rvec, tvec
        return ret
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _poseFromQuad(self, quad=None):
        '''
        estimate the pose of the object plane using quad
            setting:
        self.rvec -> rotation vector
        self.tvec -> translation vector
        '''
        if quad is None:
            quad = self.quad
        if quad.ndim == 3:
            quad = quad[0]
        # http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
        # Find the rotation and translation vectors.
        img_pn = np.ascontiguousarray(quad[:, :2],
                                      dtype=np.float32).reshape((4, 1, 2))

        obj_pn = self.obj_points - self.obj_points.mean(axis=0)
        retval, rvec, tvec = cv2.solvePnP(
            obj_pn,
            img_pn,
            self.opts['cameraMatrix'],
            self.opts['distCoeffs'],
            flags=cv2.SOLVEPNP_P3P  # because exactly four points are given
        )
        if not retval:
            print("Couln't estimate pose")
        return tvec, rvec
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def get_default_params(corners, ycoords, xcoords):

    # page width and height
    page_width = np.linalg.norm(corners[1] - corners[0])
    page_height = np.linalg.norm(corners[-1] - corners[0])
    rough_dims = (page_width, page_height)

    # our initial guess for the cubic has no slope
    cubic_slopes = [0.0, 0.0]

    # object points of flat page in 3D coordinates
    corners_object3d = np.array([
        [0, 0, 0],
        [page_width, 0, 0],
        [page_width, page_height, 0],
        [0, page_height, 0]])

    # estimate rotation and translation from four 2D-to-3D point
    # correspondences
    _, rvec, tvec = cv2.solvePnP(corners_object3d,
                                 corners, K, np.zeros(5))

    span_counts = [len(xc) for xc in xcoords]

    params = np.hstack((np.array(rvec).flatten(),
                        np.array(tvec).flatten(),
                        np.array(cubic_slopes).flatten(),
                        ycoords.flatten()) +
                       tuple(xcoords))

    return rough_dims, span_counts, params
项目:headlights    作者:Team395    | 项目源码 | 文件源码
def findCameraParameters(cont):
    objectPoints = np.array([[0,0,0],[10.5,0,0],[10.5,5,0],[0,5,0]], dtype=np.float32)
    imagePoints=cont
    ret, rvec, tvec = cv2.solvePnP(objectPoints, imagePoints, np.load("mtx.npy"), np.load("dist.npy"))
    print(tvec)
    print(rvec)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_callback(self,data):
        try:
          gray = self.mkgray(data)
        except CvBridgeError as e:
          print(e)

        # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num))
        cv2.imshow('img',gray)
        cv2.waitKey(5)
        if ret == True:
            cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
            tempimg = gray.copy()
            cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, depth_mtx, depth_dist)
            print("rvecs:")
            print(rvec)
            print("tvecs:")
            print(tvec)
            # project 3D points to image plane
            imgpts, jac = cv2.projectPoints(axis, rvec, tvec, depth_mtx, depth_dist)

            imgpts = np.int32(imgpts).reshape(-1,2)

            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4)  #BGR
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4)
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4)

            cv2.imshow('img',tempimg)
            cv2.waitKey(5)
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def register_chessboard(self):
        """
        Compute the transformation between robot pose and camera pose using chessboard registration
        techniques

        Returns
        -------
        numpy array:
            (3,4) shape array that is the computed transformation

        """

        p_mm = self.get_corners_mm()
        c_mm = self.find_chessboard()

        dist_coef = np.zeros(4)

        ret,r_vec,t_vec = cv2.solvePnP(p_mm.T,c_mm.T,self.C,dist_coef)

        r_mat,j = cv2.Rodrigues(r_vec)

        trans = np.zeros([3,4])

        trans[0:3,0:3] = r_mat
        trans[:,3] = t_vec[:,0]

        self.trans = trans
        return trans
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def handle_monocular(self, msg):

        (image, camera) = msg
        gray = self.mkgray(image)
        C = self.image_corners(gray)
        if C is not None:
            linearity_rms = self.mc.linear_error(C, self.board)

            # Add in reprojection check
            image_points = C
            object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
            dist_coeffs = numpy.zeros((4, 1))
            camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2]  ],
                                           [ camera.P[4], camera.P[5], camera.P[6]  ],
                                           [ camera.P[8], camera.P[9], camera.P[10] ] ] )
            ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
            # Convert rotation into a 3x3 Rotation Matrix
            rot3x3, _ = cv2.Rodrigues(rot)
            # Reproject model points into image
            object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans)
            reprojected_h = camera_matrix * object_points_world
            reprojected   = (reprojected_h[0:2, :] / reprojected_h[2, :])
            reprojection_errors = image_points.squeeze().T - reprojected

            reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))

            # Print the results
            print("Linearity RMS Error: %.3f Pixels      Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
        else:
            print('no chessboard')
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def get_object_pose(self, marker, tag):
        # AR Tag Dimensions
        objPoints = np.zeros((4, 3), dtype=np.float64)
        objPoints[0,0] = -1.0*tag[1]/2.0
        objPoints[0,1] = tag[1]/2.0
        objPoints[0,2] = 0.0
        objPoints[1,0] = tag[1]/2.0
        objPoints[1,1] = tag[1]/2.0
        objPoints[1,2] = 0.0
        objPoints[2,0] = tag[1]/2.0
        objPoints[2,1] = -1*tag[1]/2.0
        objPoints[2,2] = 0.0
        objPoints[3,0] = -1*tag[1]/2.0
        objPoints[3,1] = -1*tag[1]/2.0
        objPoints[3,2] = 0.0

        # Get each corner of the tags
        imgPoints = np.zeros((4, 2), dtype=np.float64)
        for i in range(4):
            imgPoints[i, :] = marker.contours[i, 0, :]


        camPos = np.zeros((3, 1))
        camRot = np.zeros((3, 1))

        # SolvePnP
        retVal, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, self.camMatrix, self.distCoeff)
        Rca, b = cv2.Rodrigues(rvec)
        Pca = tvec

        return [Pca, Rca]