Python cv2 模块,Rodrigues() 实例源码

我们从Python开源项目中,提取了以下33个代码示例,用于说明如何使用cv2.Rodrigues()

项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def rgb_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
            rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print("The world coordinate system's origin in camera's coordinate system:")
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
            data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
            yaml.dump(data, rgb_stream)


            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)


            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
项目:deeptracking    作者:lvsn    | 项目源码 | 文件源码
def detect(self, img):
        if len(img.shape) > 2:
            raise Exception("ChessboardDetector uses gray image as input")
        detection = None
        ret, corners = cv2.findChessboardCorners(img, self.chess_shape, None)
        if ret:
            ret, rvec, tvec = cv2.solvePnP(self.obj_points, corners, self.camera.matrix(), np.array([0, 0, 0, 0, 0]))
            # invert axis convention
            rvec[1] = -rvec[1]
            rvec[2] = -rvec[2]
            tvec[1] = -tvec[1]
            tvec[2] = -tvec[2]
            detection = Transform()
            detection.matrix[0:3, 0:3] = cv2.Rodrigues(rvec)[0]
            detection.set_translation(tvec[0] / 1000, tvec[1] / 1000, tvec[2] / 1000)
        return detection
项目:ArkwoodAR    作者:rdmilligan    | 项目源码 | 文件源码
def render(self, markers):

        for marker in markers:

            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == MARKER_ONE:
                self.marker_one_textures[TEXTURE_FRONT] = cv2.flip(self._get_video_frame(), 0)
                self._draw_cube(marker_rotation, self.marker_one_textures)
            elif marker_name == MARKER_TWO:
                self._draw_cube(marker_rotation, self.marker_two_textures)

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def project(self, X, check_bounds=False, check_depth=False, return_depth=False, min_depth=0.1):
        """
        Project [Nx3] points onto 2-D image plane [Nx2]
        """
        R, t = self.to_Rt()
    rvec,_ = cv2.Rodrigues(R)
    proj,_ = cv2.projectPoints(X, rvec, t, self.K, self.D)
        x = proj.reshape(-1,2)

        if check_depth: 
            # Only return positive depths
            depths = self.depth_from_projection(X)
            valid = depths >= min_depth
        else:
            valid = np.ones(len(x), dtype=np.bool)

        if check_bounds: 
            if self.shape is None: 
                raise ValueError('check_bounds cannot proceed. Camera.shape is not set')

            # Only return points within-image bounds
            valid = np.bitwise_and(
                valid, np.bitwise_and(
                    np.bitwise_and(x[:,0] >= 0, x[:,0] < self.shape[1]), \
                    np.bitwise_and(x[:,1] >= 0, x[:,1] < self.shape[0]))
            )

        if return_depth: 
            return x[valid], depths[valid]

        return x[valid]
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def __init__(self, g_pool, eye_camera_to_world_matrix , camera_intrinsics ,cal_points_3d, cal_ref_points_3d, cal_gaze_points_3d, gaze_distance = 500 ):
        super().__init__(g_pool)
        self.eye_camera_to_world_matrix  =  eye_camera_to_world_matrix
        self.rotation_matrix = self.eye_camera_to_world_matrix[:3,:3]
        self.rotation_vector = cv2.Rodrigues( self.rotation_matrix  )[0]
        self.translation_vector  = self.eye_camera_to_world_matrix[:3,3]
        self.camera_matrix = camera_intrinsics['camera_matrix']
        self.dist_coefs = camera_intrinsics['dist_coefs']
        self.world_frame_size = camera_intrinsics['resolution']
        self.camera_intrinsics = camera_intrinsics
        self.cal_points_3d = cal_points_3d
        self.cal_ref_points_3d = cal_ref_points_3d
        self.cal_gaze_points_3d = cal_gaze_points_3d
        self.visualizer = Calibration_Visualizer(g_pool, camera_intrinsics ,cal_points_3d, cal_ref_points_3d,eye_camera_to_world_matrix, cal_gaze_points_3d)
        self.g_pool = g_pool
        self.gaze_pts_debug = []
        self.sphere = {}
        self.gaze_distance = gaze_distance
项目:FaceSwap    作者:MarekKowalski    | 项目源码 | 文件源码
def fun(self, x, params):
        #skalowanie
        s = params[0]
        #rotacja
        r = params[1:4]
        #przesuniecie (translacja)
        t = params[4:6]
        w = params[6:]

        mean3DShape = x[0]
        blendshapes = x[1]

        #macierz rotacji z wektora rotacji, wzor Rodriguesa
        R = cv2.Rodrigues(r)[0]
        P = R[:2]
        shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

        projected = s * np.dot(P, shape3D) + t[:, np.newaxis]

        return projected
项目:FaceSwap    作者:MarekKowalski    | 项目源码 | 文件源码
def jacobian(self, params, x, y):
        s = params[0]
        r = params[1:4]
        t = params[4:6]
        w = params[6:]

        mean3DShape = x[0]
        blendshapes = x[1]

        R = cv2.Rodrigues(r)[0]
        P = R[:2]
        shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

        nPoints = mean3DShape.shape[1]

        #nSamples * 2 poniewaz kazdy punkt ma dwa wymiary (x i y)
        jacobian = np.zeros((nPoints * 2, self.nParams))

        jacobian[:, 0] = np.dot(P, shape3D).flatten()

        stepSize = 10e-4
        step = np.zeros(self.nParams)
        step[1] = stepSize;
        jacobian[:, 1] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
        step = np.zeros(self.nParams)
        step[2] = stepSize;
        jacobian[:, 2] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()
        step = np.zeros(self.nParams)
        step[3] = stepSize;
        jacobian[:, 3] = ((self.fun(x, params + step) - self.fun(x, params)) / stepSize).flatten()

        jacobian[:nPoints, 4] = 1
        jacobian[nPoints:, 5] = 1

        startIdx = self.nParams - self.nBlendshapes
        for i in range(self.nBlendshapes):
            jacobian[:, i + startIdx] = s * np.dot(P, blendshapes[i]).flatten()

        return jacobian

    #nie uzywane
项目:FaceSwap    作者:MarekKowalski    | 项目源码 | 文件源码
def getShape3D(mean3DShape, blendshapes, params):
    #skalowanie
    s = params[0]
    #rotacja
    r = params[1:4]
    #przesuniecie (translacja)
    t = params[4:6]
    w = params[6:]

    #macierz rotacji z wektora rotacji, wzor Rodriguesa
    R = cv2.Rodrigues(r)[0]
    shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

    shape3D = s * np.dot(R, shape3D)
    shape3D[:2, :] = shape3D[:2, :] + t[:, np.newaxis]

    return shape3D
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def imgPointToWorldCoord(XY, rvec,
                         tvec, cameraMatrix, zconst=0):
    '''
    @returns 3d object coords

    :param (ix,iy): list of 2d points (x,y) in image
    :param zconst: height above image plane (if 0, than on image plane)

    http://answers.opencv.org/question/62779/image-coordinate-to-world-coordinate-opencv/
    and
    http://stackoverflow.com/questions/12299870/computing-x-y-coordinate-3d-from-image-point
    '''
    (ix, iy) = XY
    uvPoint = np.array([ix.ravel(), iy.ravel(), np.ones(
        shape=ix.size)]).reshape(3, ix.size)

    R = cv2.Rodrigues(rvec)[0]

    iR = inv(R)
    iC = inv(cameraMatrix)

    t = iR.dot(iC).dot(uvPoint)
    t2 = iR.dot(tvec)
    s = (zconst + t2[2]) / t[2]

    objP = (iR.dot(s * iC.dot(uvPoint) - tvec))
    return objP
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def rvec2euler(rvec):
    return mat2euler(cv2.Rodrigues(rvec)[0], axes='rzxy')
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def euler2rvec(a0, a1, a2):
    return cv2.Rodrigues(euler2mat(a0, a1, a2, axes='rzxy'))[0]
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def objectOrientation(self):
        tvec, r = self.pose()
        eulerAngles = mat2euler(cv2.Rodrigues(r)[0], axes='rzxy')

        tilt = eulerAngles[1]
        rot = eulerAngles[0]
        dist = tvec[2, 0]  # only take depth component np.linalg.norm(tvec)
        return dist, tilt, rot
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def camera_position(self, pose=None):
        '''
        returns camera position in world coordinates using self.rvec and self.tvec
        from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector
        '''
        if pose is None:
            pose = self.pose()
        t, r = pose
        return -np.matrix(cv2.Rodrigues(r)[0]).T * np.matrix(t)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M
项目:deeptracking    作者:lvsn    | 项目源码 | 文件源码
def detect(self, img):
        self.likelihood = self.detector.detect_mat(img)
        detection = None
        if self.likelihood > 0.1:
            # get board and draw it
            board = self.detector.getDetectedBoard()

            rvec = board.Rvec.copy()
            tvec = board.Tvec.copy()
            matrix = cv2.Rodrigues(rvec)[0]
            rodrigues = mat2euler(matrix)
            detection = Transform.from_parameters(tvec[0], -tvec[1], -tvec[2], rodrigues[0], -rodrigues[1], -rodrigues[2])
        return detection
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def register_chessboard(self):
        """
        Compute the transformation between robot pose and camera pose using chessboard registration
        techniques

        Returns
        -------
        numpy array:
            (3,4) shape array that is the computed transformation

        """

        p_mm = self.get_corners_mm()
        c_mm = self.find_chessboard()

        dist_coef = np.zeros(4)

        ret,r_vec,t_vec = cv2.solvePnP(p_mm.T,c_mm.T,self.C,dist_coef)

        r_mat,j = cv2.Rodrigues(r_vec)

        trans = np.zeros([3,4])

        trans[0:3,0:3] = r_mat
        trans[:,3] = t_vec[:,0]

        self.trans = trans
        return trans
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def get_cam_to_lab_rotation(self,field=None):

        if field is None:
                if self.nfields > 1:
                    raise Exception('This calibration has multiple sub-fields; you must specify a pixel location to get_cam_to_lab_rotation!')
                else:
                    field = 0

        rotation_matrix = np.matrix(cv2.Rodrigues(self.fit_params[field].rvec)[0])

        return rotation_matrix.transpose()


    # Given pixel coordinates x,y, return the NORMALISED
    # coordinates of the corresponding un-distorted points.
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def __init__(self, g_pool, eye_camera_to_world_matrix0, eye_camera_to_world_matrix1 , camera_intrinsics , cal_points_3d = [],cal_ref_points_3d = [], cal_gaze_points0_3d = [], cal_gaze_points1_3d = [] ):
        super().__init__(g_pool)

        self.eye_camera_to_world_matricies  =  eye_camera_to_world_matrix0 , eye_camera_to_world_matrix1
        self.rotation_matricies  =  eye_camera_to_world_matrix0[:3,:3],eye_camera_to_world_matrix1[:3,:3]
        self.rotation_vectors = cv2.Rodrigues( eye_camera_to_world_matrix0[:3,:3]  )[0] , cv2.Rodrigues( eye_camera_to_world_matrix1[:3,:3]  )[0]
        self.translation_vectors  = eye_camera_to_world_matrix0[:3,3] , eye_camera_to_world_matrix1[:3,3]


        self.cal_points_3d = cal_points_3d
        self.cal_ref_points_3d = cal_ref_points_3d

        self.cal_gaze_points0_3d = cal_gaze_points0_3d #save for debug window
        self.cal_gaze_points1_3d = cal_gaze_points1_3d #save for debug window

        self.camera_matrix = camera_intrinsics['camera_matrix']
        self.dist_coefs = camera_intrinsics['dist_coefs']
        self.world_frame_size = camera_intrinsics['resolution']
        self.camera_intrinsics = camera_intrinsics
        self.visualizer = Calibration_Visualizer(g_pool,
                                                world_camera_intrinsics=camera_intrinsics ,
                                                cal_ref_points_3d = cal_points_3d,
                                                cal_observed_points_3d = cal_ref_points_3d,
                                                eye_camera_to_world_matrix0=  self.eye_camera_to_world_matricies[0],
                                                cal_gaze_points0_3d = cal_gaze_points0_3d,
                                                eye_camera_to_world_matrix1=  self.eye_camera_to_world_matricies[1],
                                                cal_gaze_points1_3d =  cal_gaze_points1_3d)
        self.g_pool = g_pool
        self.gaze_pts_debug0 = []
        self.gaze_pts_debug1 = []
        self.intersection_points_debug = []
        self.sphere0 = {}
        self.sphere1 = {}
        self.last_gaze_distance = 500.
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def handle_monocular(self, msg):

        (image, camera) = msg
        gray = self.mkgray(image)
        C = self.image_corners(gray)
        if C is not None:
            linearity_rms = self.mc.linear_error(C, self.board)

            # Add in reprojection check
            image_points = C
            object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
            dist_coeffs = numpy.zeros((4, 1))
            camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2]  ],
                                           [ camera.P[4], camera.P[5], camera.P[6]  ],
                                           [ camera.P[8], camera.P[9], camera.P[10] ] ] )
            ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
            # Convert rotation into a 3x3 Rotation Matrix
            rot3x3, _ = cv2.Rodrigues(rot)
            # Reproject model points into image
            object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans)
            reprojected_h = camera_matrix * object_points_world
            reprojected   = (reprojected_h[0:2, :] / reprojected_h[2, :])
            reprojection_errors = image_points.squeeze().T - reprojected

            reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))

            # Print the results
            print("Linearity RMS Error: %.3f Pixels      Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
        else:
            print('no chessboard')
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getGLM(self):
        R, _ = cv2.Rodrigues(self.RVEC)
        Rt = np.hstack((R,self.TVEC))

        Rx = np.array([[1,0,0],[0,-1,0],[0,0,-1]])

        M = np.eye(4)
        M[:3,:] = np.dot(Rx,Rt)

        m = M.T
        return m.flatten()


    # Debug code.
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getGLM(self):
        R, _ = cv2.Rodrigues(self.RVEC)
        Rt = np.hstack((R,self.TVEC))

        Rx = np.array([[1,0,0],[0,-1,0],[0,0,-1]])

        M = np.eye(4)
        M[:3,:] = np.dot(Rx,Rt)

        m = M.T
        return m.flatten()


    # Debug code.
项目:FaceSwap    作者:MarekKowalski    | 项目源码 | 文件源码
def drawCross(img, params, center=(100, 100), scale=30.0):
    R = cv2.Rodrigues(params[1:4])[0]

    points = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
    points = np.dot(points, R.T)
    points2D = points[:, :2]

    points2D = (points2D * scale + center).astype(np.int32)

    cv2.line(img, (center[0], center[1]), (points2D[0, 0], points2D[0, 1]), (255, 0, 0), 3)
    cv2.line(img, (center[0], center[1]), (points2D[1, 0], points2D[1, 1]), (0, 255, 0), 3)
    cv2.line(img, (center[0], center[1]), (points2D[2, 0], points2D[2, 1]), (0, 0, 255), 3)
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def __init__(self, transform=None, inverse_transform=None, rotation=None, translation_vector=None):
        if translation_vector is not None:
            if type(translation_vector) != np.ndarray:
                translation_vector = np.array(translation_vector)
            if translation_vector.shape != (3, 1):
                translation_vector = translation_vector.reshape(3, 1)
        if rotation is not None:
            if type(rotation) != np.ndarray:
                rotation = np.array(rotation)
            if rotation.size == 9:
                rotation_vector = cv2.Rodrigues(rotation)[0]
                rotation_matrix = rotation
            elif rotation.size == 3:
                rotation_matrix = cv2.Rodrigues(rotation)[0]
                rotation_vector = rotation
            else:
                raise ValueError(
                    "Wrong rotation size: {:d}. Expecting a 3-length vector or 3x3 matrix.".format(rotation.size))
        if transform is None:
            if translation_vector is None or rotation is None:
                raise (ValueError("Expecting either the transform matrix or both the rotation & translation vector"))
            self.T = np.vstack((np.append(rotation_matrix, translation_vector, axis=1), [0, 0, 0, 1]))
        else:
            self.T = transform
            if translation_vector is None:
                translation_vector = transform[0:3, 3].reshape(3, 1)
            if rotation is None:
                rotation_matrix = transform[0:3, 0:3]
                rotation_vector = cv2.Rodrigues(rotation_matrix)[0]
        if inverse_transform is None:
            rot_mat_inv = rotation_matrix.T
            inverse_translation = -rot_mat_inv.dot(translation_vector)
            inverse_transform = np.vstack((np.append(rot_mat_inv, inverse_translation, 1), [0, 0, 0, 1]))

        self.rmat = rotation_matrix
        self.tvec = translation_vector
        self.rvec = rotation_vector
        self.T_inv = inverse_transform
项目:blmath    作者:bodylabs    | 项目源码 | 文件源码
def compute_r(self):
            import cv2
            return cv2.Rodrigues(self.rt.r)[0]
项目:blmath    作者:bodylabs    | 项目源码 | 文件源码
def compute_dr_wrt(self, wrt):
            import cv2
            if wrt is self.rt:
                return cv2.Rodrigues(self.rt.r)[1].T
项目:blmath    作者:bodylabs    | 项目源码 | 文件源码
def rotate_to_xz_plane(points, normal=None):
    '''
    Rotates points to the x-z plane. If the initial center
    of mass is not to within 1e-5 of the origin, we
    translate it to the origin.

    Returns (r, R, p0):

        - r is the rotated image of points,
        - R is the rotation matrix
        - p0 is the translation factor (can be None)

    '''
    import cv2
    from blmath.geometry.transform.translation import translation

    if points is None or not len(points): # pylint: disable=len-as-condition
        raise ValueError('Some points are required')

    center = np.mean(points, axis=0)

    if np.linalg.norm(center) > 1e-5:
        translated, p0 = translation(points)
    else:
        translated, p0 = points, None

    if not normal:
        normal = estimate_normal(points)

    e_2 = np.array([0., 1., 0.])
    theta = np.arccos(np.dot(e_2, normal))

    if min(abs(theta - np.pi), abs(theta)) < 1e-5:
        # cross product will degenerate
        # to zero vector in this case
        r_axis = np.array([1., 0., 0.])
    else:
        r_axis = np.cross(normal, e_2)

    r_axis /= np.linalg.norm(r_axis)

    R = cv2.Rodrigues(theta * r_axis)[0]
    rotated = np.dot(translated, R.T)

    return (rotated, R, p0)
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def get_object_pose(self, marker, tag):
        # AR Tag Dimensions
        objPoints = np.zeros((4, 3), dtype=np.float64)
        objPoints[0,0] = -1.0*tag[1]/2.0
        objPoints[0,1] = tag[1]/2.0
        objPoints[0,2] = 0.0
        objPoints[1,0] = tag[1]/2.0
        objPoints[1,1] = tag[1]/2.0
        objPoints[1,2] = 0.0
        objPoints[2,0] = tag[1]/2.0
        objPoints[2,1] = -1*tag[1]/2.0
        objPoints[2,2] = 0.0
        objPoints[3,0] = -1*tag[1]/2.0
        objPoints[3,1] = -1*tag[1]/2.0
        objPoints[3,2] = 0.0

        # Get each corner of the tags
        imgPoints = np.zeros((4, 2), dtype=np.float64)
        for i in range(4):
            imgPoints[i, :] = marker.contours[i, 0, :]


        camPos = np.zeros((3, 1))
        camRot = np.zeros((3, 1))

        # SolvePnP
        retVal, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, self.camMatrix, self.distCoeff)
        Rca, b = cv2.Rodrigues(rvec)
        Pca = tvec

        return [Pca, Rca]
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def get_pupilpos(self,x_pixels=None,y_pixels=None,field=None,Coords='Display'):

        if x_pixels is not None or y_pixels is not None:
            if x_pixels is None or y_pixels is None:
                raise ValueError("X pixels and Y pixels must both be specified!")
            if np.shape(x_pixels) != np.shape(y_pixels):
                raise ValueError("X pixels array and Y pixels array must be the same size!")

            # Flatten everything and create output array        
            oldshape = np.shape(x_pixels)

            x_pixels = np.reshape(x_pixels,np.size(x_pixels),order='F')
            y_pixels = np.reshape(y_pixels,np.size(y_pixels),order='F')
            out = np.zeros(np.shape(x_pixels) + (3,))

            # Convert pixel coords if we need to
            if Coords.lower() == 'original':
                x_pixels,y_pixels = self.transform.original_to_display_coords(x_pixels,y_pixels)

            # Identify which sub-field each pixel is in
            pointfield = self.fieldmask[y_pixels.round().astype(int),x_pixels.round().astype(int)]
            if np.size(pointfield) == 1:
                pointfield = [pointfield]

            for i in range(np.size(x_pixels)):
                out[i,:] = self.get_pupilpos(field=pointfield[i])

            return np.reshape(out,oldshape + (3,),order='F')

        else:

            if field is None:
                if self.nfields == 1:
                    field = 0
                else:
                    raise Exception('This calibration has multiple sub-fields; you must specify a pixel location to get_pupilpos!')


            rotation_matrix = np.matrix(cv2.Rodrigues(self.fit_params[field].rvec)[0])
            CamPos = np.matrix(self.fit_params[field].tvec)
            CamPos = - (rotation_matrix.transpose() * CamPos)
            CamPos = np.array(CamPos)

            return np.array([CamPos[0][0],CamPos[1][0],CamPos[2][0]])


    # Get X and Y field of view of the camera (X and Y being horizontal and vertical of the detector)
    # Optional inputs: field - for cameras with split fields-of-view, the sub-field number to get the FOV of (int).
    #                  FullChipWithoutDistortion - ignores distortion and any split field-of-view, just returns the FOV
    #                                              for the full chip as if there was no distortion. Used in Calcam.Render() but probably not useful otherwise (bool).
    # Output: 2-element tuple with field of view in degrees: (horiz, vert) (tuple of floats)
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def set_extrinsics(self,campos,upvec,camtar=None,view_dir=None,opt_axis = False):


        if camtar is not None:
            w = np.squeeze(np.array(camtar) - np.array(campos))
        elif view_dir is not None:
            w = view_dir
        else:
            raise ValueError('Either viewing target or view direction must be specified!')

        w = w / np.sqrt(np.sum(w**2))
        v = upvec / np.sqrt(np.sum(upvec**2))


        # opt_axis specifies whether the input campos or camtar are where we should
        # point the optical axis or the view centre. By defauly we assume the view centre.
        # In this case, we need another rotation matrix to rotate from the image centre
        # view direction (given) to the optical axis direction (stored). This applies for
        # intrinsics where the perspective centre is not at the the detector centre.
        if not opt_axis:
            R = np.zeros([3,3])
            # Optical axis direction in the camera coordinate system
            uz = np.array(self.normalise(self.image_display_shape[0]/2.,self.image_display_shape[1]/2.,0) + (1.,))
            uz = uz / np.sqrt(np.sum(uz**2))

            ux = np.array([1,0,0]) - uz[0]*uz
            ux = ux / np.sqrt(np.sum(ux**2))
            uy = np.cross(ux,uz)
            R[:,0] = ux
            R[:,1] = -uy
            R[:,2] = uz

            R = np.matrix(R).T

        else:
        # If we are pointing the optical axis, set this extra
        # rotation to be the identity.
            R = np.matrix([[1,0,0],[0,1,0],[0,0,1]])


        u = np.cross(w,v)

        Rmatrix = np.zeros([3,3])
        Rmatrix[:,0] = u
        Rmatrix[:,1] = -v
        Rmatrix[:,2] = w
        Rmatrix = np.matrix(Rmatrix)


        Rmatrix = Rmatrix * R
        campos = np.matrix(campos)
        if campos.shape[0] < campos.shape[1]:
            campos = campos.T

        self.fit_params[0].tvec = -Rmatrix.T * campos
        self.fit_params[0].rvec = -cv2.Rodrigues(Rmatrix)[0]