Python cv2 模块,cornerSubPix() 实例源码

我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用cv2.cornerSubPix()

项目:pycalibrate    作者:reconstruct-on-the-fly    | 项目源码 | 文件源码
def find_points(images):
    pattern_size = (9, 6)
    obj_points = []
    img_points = []

    # Assumed object points relation
    a_object_point = np.zeros((PATTERN_SIZE[1] * PATTERN_SIZE[0], 3),
                              np.float32)
    a_object_point[:, :2] = np.mgrid[0:PATTERN_SIZE[0],
                                     0:PATTERN_SIZE[1]].T.reshape(-1, 2)

    # Termination criteria for sub pixel corners refinement
    stop_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER,
                     30, 0.001)

    print('Finding points ', end='')
    debug_images = []
    for (image, color_image) in images:
        found, corners = cv.findChessboardCorners(image, PATTERN_SIZE, None)
        if found:
            obj_points.append(a_object_point)
            cv.cornerSubPix(image, corners, (11, 11), (-1, -1), stop_criteria)
            img_points.append(corners)

            print('.', end='')
        else:
            print('-', end='')

        if DEBUG:
            cv.drawChessboardCorners(color_image, PATTERN_SIZE, corners, found)
            debug_images.append(color_image)

        sys.stdout.flush()

    if DEBUG:
        display_images(debug_images, DISPLAY_SCALE)

    print('\nWas able to find points in %s images' % len(img_points))
    return obj_points, img_points


# images is a lis of tuples: (gray_image, color_image)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def rgb_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
            rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print("The world coordinate system's origin in camera's coordinate system:")
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
            data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
            yaml.dump(data, rgb_stream)


            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_calib_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (y_num,x_num))
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (y_num,x_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "w")
            data = {'corners':ir_corners.tolist()}
            yaml.dump(data, depth_stream)

            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)


            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _findChessboard(self):
        # Find the chess board corners
        flags = cv2.CALIB_CB_FAST_CHECK
        if self._detect_sensible:
            flags = (cv2.CALIB_CB_FAST_CHECK |
                     cv2.CALIB_CB_ADAPTIVE_THRESH |
                     cv2.CALIB_CB_FILTER_QUADS |
                     cv2.CALIB_CB_NORMALIZE_IMAGE)

        (didFindCorners, corners) = cv2.findChessboardCorners(
            self.img, self.opts['size'], flags=flags
        )
        if didFindCorners:
            # further refine corners, corners is updatd in place
            cv2.cornerSubPix(self.img, corners, (11, 11), (-1, -1),
                             # termination criteria for corner estimation for
                             # chessboard method
                             (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                              30, 0.001)
                             )  # returns None
        return didFindCorners, corners
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def rgb_calib_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (y_num,x_num),None)

        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (y_num,x_num), rgb_corners,rgb_ret)

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_corners.yaml", "w")
            data = {'corners':rgb_corners.tolist()}
            yaml.dump(data, rgb_stream)

            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def rgb_callback(self,data):
        try:
            img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        ret, corners = cv2.findChessboardCorners(img, (x_num,y_num))
        cv2.imshow('img',img)
        cv2.waitKey(5)
        if ret == True:
            cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria)
            tempimg = img.copy()
            cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist)
            print("rvecs:")
            print(rvec)
            print("tvecs:")
            print(tvec)
            # project 3D points to image plane
            imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist)

            imgpts = np.int32(imgpts).reshape(-1,2)

            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4)  #BGR
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4)
            cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4)

            cv2.imshow('img',tempimg)
            cv2.waitKey(5)
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def load_frame_images(self):
        """
        Load images (or image pairs) from self.full_frame_folder_path
        """
        print("Loading frames from '{0:s}'".format(self.full_frame_folder_path))
        all_files = [f for f in os.listdir(self.full_frame_folder_path)
                     if osp.isfile(osp.join(self.full_frame_folder_path, f)) and f.endswith(".png")]
        all_files.sort()

        usable_frame_ct = sys.maxsize

        frame_number_sets = []

        for video in self.videos:
            # assume matching numbers in corresponding left & right files
            files = [f for f in all_files if f.startswith(video.name)]
            files.sort()  # added to be explicit

            cam_frame_ct = 0
            frame_numbers = []
            for ix_pair in range(len(files)):
                frame = cv2.imread(osp.join(self.full_frame_folder_path, files[ix_pair]))
                frame_number = int(re.search(r'\d\d\d\d', files[ix_pair]).group(0))
                frame_numbers.append(frame_number)
                found, corners = cv2.findChessboardCorners(frame, self.board_dims)
                if not found:
                    raise ValueError("Could not find corners in image '{0:s}'".format(files[ix_pair]))
                grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cv2.cornerSubPix(grey, corners, (11, 11), (-1, -1), self.criteria_subpix)
                video.image_points.append(corners)
                video.usable_frames[frame_number] = ix_pair
                cam_frame_ct += 1
            usable_frame_ct = min(usable_frame_ct, cam_frame_ct)
            frame_number_sets.append(frame_numbers)

        if len(self.videos) > 1:
            # check that all cameras have the same frame number sets
            if len(frame_number_sets[0]) != len(frame_number_sets[1]):
                raise ValueError(
                    "There are some non-paired frames in folder '{0:s}'".format(self.full_frame_folder_path))
            for i_fn in range(len(frame_number_sets[0])):
                fn0 = frame_number_sets[0][i_fn]
                fn1 = frame_number_sets[1][i_fn]
                if fn0 != fn1:
                    raise ValueError("There are some non-paired frames in folder '{0:s}'." +
                                     " Check frame {1:d} for camera {2:s} and frame {3:d} for camera {4:s}."
                                     .format(self.full_frame_folder_path,
                                             fn0, self.videos[0].name,
                                             fn1, self.videos[1].name))

        for i_frame in range(usable_frame_ct):
            self.object_points.append(self.board_object_corner_set)
        return usable_frame_ct
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def add_corners(self, i_frame, subpixel_criteria, frame_folder_path=None,
                    save_image=False, save_chekerboard_overlay=False):
        grey_frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        cv2.cornerSubPix(grey_frame, self.current_image_points, (11, 11), (-1, -1), subpixel_criteria)
        if save_image:
            png_path = (os.path.join(frame_folder_path,
                                     "{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png")))
            cv2.imwrite(png_path, self.frame)
            if save_chekerboard_overlay:
                png_path = (os.path.join(frame_folder_path,
                                         "checkerboard_{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png")))
                overlay = self.frame.copy()
                cv2.drawChessboardCorners(overlay, self.current_board_dims, self.current_image_points, True)
                cv2.imwrite(png_path, overlay)
        self.usable_frames[i_frame] = len(self.image_points)
        self.image_points.append(self.current_image_points)
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def draw_chessboard_corners(image):
    # Find the chess board corners
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray_image, (9, 6), None)

    # Draw image
    if ret is True:
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                    30,
                    0.001)
        corners2 = cv2.cornerSubPix(gray_image,
                                    corners,
                                    (11, 11),
                                    (-1, -1),
                                    criteria)
        img = cv2.drawChessboardCorners(image,
                                        (9, 6),
                                        corners2,
                                        ret)

    return img
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:ArkwoodAR    作者:rdmilligan    | 项目源码 | 文件源码
def get_vectors(image, points, mtx, dist):

    # order points
    points = _order_points(points)

    # set up criteria, image, points and axis
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    imgp = np.array(points, dtype='float32')

    objp = np.array([[0.,0.,0.],[1.,0.,0.],
                        [1.,1.,0.],[0.,1.,0.]], dtype='float32')  

    # calculate rotation and translation vectors
    cv2.cornerSubPix(gray,imgp,(11,11),(-1,-1),criteria)
    rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)

    return rvecs, tvecs
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def subpixel_pts(self, im, pts): 
        """Perform subpixel refinement"""
        term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
        cv2.cornerSubPix(im, pts, (10, 10), (-1, -1), term)
        return
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def downsample_and_detect(self, img):
        """
        Downsample the input image to approximately VGA resolution and detect the
        calibration target corners in the full-size image.

        Combines these apparently orthogonal duties as an optimization. Checkerboard
        detection is too expensive on large images, so it's better to do detection on
        the smaller display image and scale the corners back up to the correct size.

        Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
        """
        # Scale the input image down to ~VGA size
        height = img.shape[0]
        width = img.shape[1]
        scale = math.sqrt( (width*height) / (640.*480.) )
        if scale > 1.0:
            scrib = cv2.resize(img, (int(width / scale), int(height / scale)))
        else:
            scrib = img
        # Due to rounding, actual horizontal/vertical scaling may differ slightly
        x_scale = float(width) / scrib.shape[1]
        y_scale = float(height) / scrib.shape[0]

        if self.pattern == Patterns.Chessboard:
            # Detect checkerboard
            (ok, downsampled_corners, board) = self.get_corners(scrib, refine = True)

            # Scale corners back to full size image
            corners = None
            if ok:
                if scale > 1.0:
                    # Refine up-scaled corners in the original full-res image
                    # TODO Does this really make a difference in practice?
                    corners_unrefined = downsampled_corners.copy()
                    corners_unrefined[:, :, 0] *= x_scale
                    corners_unrefined[:, :, 1] *= y_scale
                    radius = int(math.ceil(scale))
                    if len(img.shape) == 3 and img.shape[2] == 3:
                        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    else:
                        mono = img
                    cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1),
                                                  ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
                    corners = corners_unrefined
                else:
                    corners = downsampled_corners
        else:
            # Circle grid detection is fast even on large images
            (ok, corners, board) = self.get_corners(img)
            # Scale corners to downsampled image for display
            downsampled_corners = None
            if ok:
                if scale > 1.0:
                    downsampled_corners = corners.copy()
                    downsampled_corners[:,:,0] /= x_scale
                    downsampled_corners[:,:,1] /= y_scale
                else:
                    downsampled_corners = corners

        return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getP(self, dst):
        """
        dst: ??????

        return self.MTX,self.DIST,self.RVEC,self.TVEC:
        ?? ?????????????????

        """
        if self.SceneImage is None:
            return None

        corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
        gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
        objp = np.zeros((2*2,3), np.float32)
        objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)

        corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)

        if self.PTimes < self.PCount or self.PCount == 0:
            # Arrays to store object points and image points from all the images.
            objpoints = self.OBJPoints # 3d point in real world space
            imgpoints = self.IMGPoints # 2d points in image plane.

            if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
                objpoints.append(objp)
                imgpoints.append(corners2)

            # Find mtx, dist, rvecs, tvecs
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
            if not ret:
                self.PTimes += 1
                return None
            self.OBJPoints = objpoints
            self.IMGPoints = imgpoints
            self.MTX = mtx
            self.DIST = dist
            self.RVEC = rvecs[0]
            self.TVEC = tvecs[0]
        else:
            # Find the rotation and translation vectors.
            _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
            self.RVEC = rvec
            self.TVEC = tvec
        self.PTimes += 1

        return self.MTX,self.DIST,self.RVEC,self.TVEC
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getP(self, dst):
        """
        dst: ??????

        return self.MTX,self.DIST,self.RVEC,self.TVEC:
        ?? ?????????????????

        """
        if self.SceneImage is None:
            return None

        corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
        gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
        objp = np.zeros((2*2,3), np.float32)
        objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)

        corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)

        if self.PTimes < self.PCount or self.PCount == 0:
            # Arrays to store object points and image points from all the images.
            objpoints = self.OBJPoints # 3d point in real world space
            imgpoints = self.IMGPoints # 2d points in image plane.

            if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
                objpoints.append(objp)
                imgpoints.append(corners2)

            # Find mtx, dist, rvecs, tvecs
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
            if not ret:
                self.PTimes += 1
                return None
            self.OBJPoints = objpoints
            self.IMGPoints = imgpoints
            self.MTX = mtx
            self.DIST = dist
            self.RVEC = rvecs[0]
            self.TVEC = tvecs[0]
        else:
            # Find the rotation and translation vectors.
            _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
            self.RVEC = rvec
            self.TVEC = tvec
        self.PTimes += 1

        return self.MTX,self.DIST,self.RVEC,self.TVEC
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def camera_cal(self, image):

        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        nx = 8
        ny = 6

        dst = np.copy(image) 

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((ny * nx, 3), np.float32)
        objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)

        # Arrays to store object points and image points from all the images.
        objpoints = [] # 3d points in real world space
        imgpoints = [] # 2d points in image plane.

        # Search for chessboard corners
        grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        #ret_thresh,  mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)

        ret, corners = cv2.findChessboardCorners(image, (nx, ny), None)  #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))        

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)           
            cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
            imgpoints.append(corners)
            self.calibrated = True
            print ("FOUND!")

            #Draw and display the corners
            cv2.drawChessboardCorners(image, (nx, ny), corners, ret)  

            # Do camera calibration given object points and image points
            ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)        

            # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
            dist_pickle = {}
            dist_pickle["mtx"] = self.mtx
            dist_pickle["dist"] = self.dist
            dist_pickle['objpoints'] = objpoints
            dist_pickle['imgpoints'] = imgpoints
            pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )

         #else:
             #print("Searching...")

        return image
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def find_chessboard(self, sx=6, sy=9):
        """Finds the corners of an sx X sy chessboard in the image.

        Parameters
        ----------
        sx : int
            Number of chessboard corners in x-direction.
        sy : int
            Number of chessboard corners in y-direction.

        Returns
        -------
        :obj:`list` of :obj:`numpy.ndarray`
            A list containing the 2D points of the corners of the detected
            chessboard, or None if no chessboard found.
        """
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS +
                    cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((sx * sy, 3), np.float32)
        objp[:, :2] = np.mgrid[0:sx, 0:sy].T.reshape(-1, 2)

        # Arrays to store object points and image points from all the images.
        objpoints = []  # 3d point in real world space
        imgpoints = []  # 2d points in image plane.

        # create images
        img = self.data.astype(np.uint8)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (sx, sy), None)

        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
            imgpoints.append(corners)

            if corners is not None:
                return corners.squeeze()
        return None