Python cv2 模块,undistortPoints() 实例源码

我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用cv2.undistortPoints()

项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def undistort_unproject_pts(pts_uv, camera_matrix, dist_coefs):
    """
    This function converts a set of 2D image coordinates to vectors in pinhole camera space.
    Hereby the intrinsics of the camera are taken into account.
    UV is converted to normalized image space (think frustum with image plane at z=1) then undistored
    adding a z_coordinate of 1 yield vectors pointing from 0,0,0 to the undistored image pixel.
    @return: ndarray with shape=(n, 3)

    """
    pts_uv = np.array(pts_uv)
    num_pts = pts_uv.size / 2

    pts_uv.shape = (int(num_pts), 1, 2)
    pts_uv = cv2.undistortPoints(pts_uv, camera_matrix, dist_coefs)
    pts_3d = cv2.convertPointsToHomogeneous(np.float32(pts_uv))
    pts_3d.shape = (int(num_pts),3)
    return pts_3d
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def undistort_points(self, pts): 
        """
        Undistort image points using camera matrix, and distortion coeffs

        Have to provide P matrix for appropriate scaling
        http://code.opencv.org/issues/1964#note-2
        """
        out = cv2.undistortPoints(pts.reshape(-1,1,2).astype(np.float32), self.K, self.D, P=self.K)
        return out.reshape(-1,2)
项目:LensCalibrator    作者:1024jp    | 项目源码 | 文件源码
def calibrate_points(self, points):
        return cv2.undistortPoints(np.array([points]), self.camera_matrix,
                                   self.dist_coeffs,
                                   P=self.new_camera_matrix)[0]
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def build_correspondance(self, visible_markers,camera_calibration,min_marker_perimeter,min_id_confidence):
        """
        - use all visible markers
        - fit a convex quadrangle around it
        - use quadrangle verts to establish perpective transform
        - map all markers into surface space
        - build up list of found markers and their uv coords
        """

        all_verts = [m['verts'] for m in visible_markers if m['perimeter']>=min_marker_perimeter]
        if not all_verts:
            return
        all_verts = np.array(all_verts,dtype=np.float32)
        all_verts.shape = (-1,1,2) # [vert,vert,vert,vert,vert...] with vert = [[r,c]]
        # all_verts_undistorted_normalized centered in img center flipped in y and range [-1,1]
        all_verts_undistorted_normalized = cv2.undistortPoints(all_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs']*self.use_distortion)
        hull = cv2.convexHull(all_verts_undistorted_normalized,clockwise=False)

        #simplify until we have excatly 4 verts
        if hull.shape[0]>4:
            new_hull = cv2.approxPolyDP(hull,epsilon=1,closed=True)
            if new_hull.shape[0]>=4:
                hull = new_hull
        if hull.shape[0]>4:
            curvature = abs(GetAnglesPolyline(hull,closed=True))
            most_acute_4_threshold = sorted(curvature)[3]
            hull = hull[curvature<=most_acute_4_threshold]


        # all_verts_undistorted_normalized space is flipped in y.
        # we need to change the order of the hull vertecies
        hull = hull[[1,0,3,2],:,:]

        # now we need to roll the hull verts until we have the right orientation:
        # all_verts_undistorted_normalized space has its origin at the image center.
        # adding 1 to the coordinates puts the origin at the top left.
        distance_to_top_left = np.sqrt((hull[:,:,0]+1)**2+(hull[:,:,1]+1)**2)
        bot_left_idx = np.argmin(distance_to_top_left)+1
        hull = np.roll(hull,-bot_left_idx,axis=0)

        #based on these 4 verts we calculate the transformations into a 0,0 1,1 square space
        m_from_undistored_norm_space = m_verts_from_screen(hull)
        self.detected = True
        # map the markers vertices into the surface space (one can think of these as texture coordinates u,v)
        marker_uv_coords =  cv2.perspectiveTransform(all_verts_undistorted_normalized,m_from_undistored_norm_space)
        marker_uv_coords.shape = (-1,4,1,2) #[marker,marker...] marker = [ [[r,c]],[[r,c]] ]

        # build up a dict of discovered markers. Each with a history of uv coordinates
        for m,uv in zip (visible_markers,marker_uv_coords):
            try:
                self.markers[m['id']].add_uv_coords(uv)
            except KeyError:
                self.markers[m['id']] = Support_Marker(m['id'])
                self.markers[m['id']].add_uv_coords(uv)

        #average collection of uv correspondences accros detected markers
        self.build_up_status = sum([len(m.collected_uv_coords) for m in self.markers.values()])/float(len(self.markers))

        if self.build_up_status >= self.required_build_up:
            self.finalize_correnspondance()
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def add_marker(self,marker,visible_markers,camera_calibration,min_marker_perimeter,min_id_confidence):
        '''
        add marker to surface.
        '''
        res = self._get_location(visible_markers,camera_calibration,min_marker_perimeter,min_id_confidence,locate_3d=False)
        if res['detected']:
            support_marker = Support_Marker(marker['id'])
            marker_verts = np.array(marker['verts'])
            marker_verts.shape = (-1,1,2)
            marker_verts_undistorted_normalized = cv2.undistortPoints(marker_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs']*self.use_distortion)
            marker_uv_coords =  cv2.perspectiveTransform(marker_verts_undistorted_normalized,res['m_from_undistored_norm_space'])
            support_marker.load_uv_coords(marker_uv_coords)
            self.markers[marker['id']] = support_marker
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def undistort_points(self, src):
        """
        :param src: N source pixel points (u,v) as an Nx2 matrix
        :type src: :class:`cvMat`

        Apply the post-calibration undistortion to the source points
        """

        return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def undistortPoints(self, points, keepSize=False):
        '''
        points --> list of (x,y) coordinates
        '''
        s = self.img.shape
        cam = self.coeffs['cameraMatrix']
        d = self.coeffs['distortionCoeffs']

        pts = np.asarray(points, dtype=np.float32)
        if pts.ndim == 2:
            pts = np.expand_dims(pts, axis=0)

        (newCameraMatrix, roi) = cv2.getOptimalNewCameraMatrix(cam,
                                                               d, s[::-1], 1,
                                                               s[::-1])
        if not keepSize:
            xx, yy = roi[:2]
            pts[0, 0] -= xx
            pts[0, 1] -= yy

        return cv2.undistortPoints(pts,
                                   cam, d, P=newCameraMatrix)
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def normalise(self,x,y,field):

        if np.shape(x) != np.shape(y):
            raise ValueError("X and Y input arrays must be the same size!")

        if self.fit_params[field].model == 'fisheye' and opencv_major_version < 3:
            raise Exception('Fisheye model distortion calculation requires OpenCV 3 or newer! Your version is ' + cv2.__version__)

        # Flatten everything and create output array        
        oldshape = np.shape(x)
        x = np.reshape(x,np.size(x),order='F')
        y = np.reshape(y,np.size(y),order='F')

        input_points = np.zeros([x.size,1,2])
        for point in range(len(x)):
            input_points[point,0,0] = x[point]
            input_points[point,0,1] = y[point]

        if self.fit_params[field].model == 'perspective':
            undistorted = cv2.undistortPoints(input_points,self.fit_params[field].cam_matrix,self.fit_params[field].kc)
        elif self.fit_params[field].model == 'fisheye':
            undistorted = cv2.fisheye.undistortPoints(input_points,self.fit_params[field].cam_matrix,self.fit_params[field].kc)

        undistorted = np.swapaxes(undistorted,0,1)[0]

        return np.reshape(undistorted[:,0],oldshape,order='F') , np.reshape(undistorted[:,1],oldshape,order='F')


    # Get the sight-line direction(s) for given pixel coordinates, as unit vector(s) in the lab frame.
    # Input: x_pixel and y_pixel - array-like, x and y pixel coordinates (floats or arrays/lists of floats)
    # Optional inputs: ForceField - for split field cameras, get the sight line direction as if the pixel
    #                               was part of the specified subfield, even if it isn't really (int)
    #                  Coords - whether the input x_pixel and y_pixel values are in display or original 
    #                           coordimates (default Display; string either 'Display' or 'Original')
    # Output: Numpy array with 1 more dimension than the input x_pixels and y_pixels, but otherwise
    #         the same size and shape. The extra dimension indexes the 3 vector components, 0 = X, 1 = Y, 2 = Z.
    #         This is a unit vector in the CAD model coordinate system.
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def cpmtriangulate(pts):
    pts = pts[:,::-1,:]
    c1 = Camera(Camera.buildK(
        #[564.5793378468188, 562.7507396707426, 807.514870534443, 638.3417715516073]),
        [517.2287393382929, 525.0704075144106, 774.5928420208769, 591.6267497011125]),
        np.eye(3),
        np.zeros((3,1)))

    P2 = np.array([
        #[0.9987049032311739, 0.005161677353747297, -0.05061495183159303, 0.0975936934184045],
    #[-0.004173863762698966, 0.9997991391796881, 0.01960255485522677, 0.00181642123998563],
    #[0.05070596733431972, -0.01936590773647232, 0.9985258466831194, 0.006270242291420671]
  [0.9997257921076083, -0.002649760120974218, -0.023266270996836397, 0.09259191413077857],
  [0.0027696869905852674, 0.9999830374718406, 0.005123826943546446, -0.0014153393536146166],
  [0.02325229942975788, -0.005186862237858692, 0.9997161732368524, -0.0005078842007711909]
    ])
    c2 = Camera(Camera.buildK(
            [521.1484829793496, 526.8842673949462, 789.4993718170895, 576.4476020205435]),
        P2[:,:3],
        P2[:,3])
    #c1, c2 = read_temple_camera()
    npts = pts.shape[0]

    pts_coord = [[], []]
    for p in pts:
        p1, p2 = p[0], p[1]
        p1, p2 = coordinate_recover(p1), coordinate_recover(p2)
        pts_coord[0].append(p1)
        pts_coord[1].append(p2)

    pts1 = np.asarray(pts_coord[0]).reshape((npts,1,2)).astype('float32')
    pts2 = np.asarray(pts_coord[1]).reshape((npts,1,2)).astype('float32')
    if True:    # do undistort:
        pts1 = cv2.undistortPoints(pts1, c1.K,
                np.array([-0.23108204 ,0.03321534, 0.00227184 ,0.00240575]))
        #pts1 = cv2.undistortPoints(pts1, c1.K,
                #np.array([0,0,0,0]))
        pts1 = pts1.reshape((npts,2))

        pts2 = cv2.undistortPoints(pts2, c2.K,
                np.array([-0.23146758 ,0.03342091 ,0.00133691 ,0.00034652]))
        #pts2 = cv2.undistortPoints(pts2, c2.K,
                #np.array([0,0,0,0]))
        pts2 = pts2.reshape((npts,2))

        c1 = Camera(np.eye(3),c1.R,c1.t)
        c2 = Camera(np.eye(3),c2.R,c2.t)
    else:
        pts1 = pts1[:,0,:]
        pts2 = pts2[:,0,:]

    pts3d = []
    for p1, p2 in zip(pts1, pts2):
        p3d = triangulate(c1, c2, p1, p2)
        pts3d.append(p3d)
    pts3d = np.array(pts3d)
    return pts3d