Python cv2 模块,TERM_CRITERIA_EPS 实例源码

我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用cv2.TERM_CRITERIA_EPS

项目:pycalibrate    作者:reconstruct-on-the-fly    | 项目源码 | 文件源码
def find_points(images):
    pattern_size = (9, 6)
    obj_points = []
    img_points = []

    # Assumed object points relation
    a_object_point = np.zeros((PATTERN_SIZE[1] * PATTERN_SIZE[0], 3),
                              np.float32)
    a_object_point[:, :2] = np.mgrid[0:PATTERN_SIZE[0],
                                     0:PATTERN_SIZE[1]].T.reshape(-1, 2)

    # Termination criteria for sub pixel corners refinement
    stop_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER,
                     30, 0.001)

    print('Finding points ', end='')
    debug_images = []
    for (image, color_image) in images:
        found, corners = cv.findChessboardCorners(image, PATTERN_SIZE, None)
        if found:
            obj_points.append(a_object_point)
            cv.cornerSubPix(image, corners, (11, 11), (-1, -1), stop_criteria)
            img_points.append(corners)

            print('.', end='')
        else:
            print('-', end='')

        if DEBUG:
            cv.drawChessboardCorners(color_image, PATTERN_SIZE, corners, found)
            debug_images.append(color_image)

        sys.stdout.flush()

    if DEBUG:
        display_images(debug_images, DISPLAY_SCALE)

    print('\nWas able to find points in %s images' % len(img_points))
    return obj_points, img_points


# images is a lis of tuples: (gray_image, color_image)
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _findChessboard(self):
        # Find the chess board corners
        flags = cv2.CALIB_CB_FAST_CHECK
        if self._detect_sensible:
            flags = (cv2.CALIB_CB_FAST_CHECK |
                     cv2.CALIB_CB_ADAPTIVE_THRESH |
                     cv2.CALIB_CB_FILTER_QUADS |
                     cv2.CALIB_CB_NORMALIZE_IMAGE)

        (didFindCorners, corners) = cv2.findChessboardCorners(
            self.img, self.opts['size'], flags=flags
        )
        if didFindCorners:
            # further refine corners, corners is updatd in place
            cv2.cornerSubPix(self.img, corners, (11, 11), (-1, -1),
                             # termination criteria for corner estimation for
                             # chessboard method
                             (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                              30, 0.001)
                             )  # returns None
        return didFindCorners, corners
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def sparse_optical_flow(im1, im2, pts, fb_threshold=-1, 
                        window_size=15, max_level=2, 
                        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)): 

    # Forward flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(im1, im2, pts, None, 
                                           winSize=(window_size, window_size), 
                                           maxLevel=max_level, criteria=criteria )

    # Backward flow
    if fb_threshold > 0:     
        p0r, st0, err = cv2.calcOpticalFlowPyrLK(im2, im1, p1, None, 
                                           winSize=(window_size, window_size), 
                                           maxLevel=max_level, criteria=criteria)
        p0r[st0 == 0] = np.nan

        # Set only good
        fb_good = (np.fabs(p0r-p0) < fb_threshold).all(axis=1)

        p1[~fb_good] = np.nan
        st = np.bitwise_and(st, st0)
        err[~fb_good] = np.nan

    return p1, st, err
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def k(screen):
        Z = screen.reshape((-1,3))

        # convert to np.float32
        Z = np.float32(Z)

        # define criteria, number of clusters(K) and apply kmeans()
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2
        ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

        # Now convert back into uint8, and make original image
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((screen.shape))
        return res2
项目:Machine-Learning    作者:Jegathis    | 项目源码 | 文件源码
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows()
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def draw_chessboard_corners(image):
    # Find the chess board corners
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray_image, (9, 6), None)

    # Draw image
    if ret is True:
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                    30,
                    0.001)
        corners2 = cv2.cornerSubPix(gray_image,
                                    corners,
                                    (11, 11),
                                    (-1, -1),
                                    criteria)
        img = cv2.drawChessboardCorners(image,
                                        (9, 6),
                                        corners2,
                                        ret)

    return img
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def feature_tracking(image_ref, image_cur, px_ref):
    """Feature Tracking

    Parameters
    ----------
    image_ref : np.array
        Reference image
    image_cur : np.array
        Current image
    px_ref :
        Reference pixels

    Returns
    -------
    (kp1, kp2) : (list of Keypoints, list of Keypoints)

    """
    # Setup
    win_size = (21, 21)
    max_level = 3
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01)

    # Perform LK-tracking
    lk_params = {"winSize": win_size,
                 "maxLevel": max_level,
                 "criteria": criteria}
    kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref,
                                            image_cur,
                                            px_ref,
                                            None,
                                            **lk_params)

    # Post-process
    st = st.reshape(st.shape[0])
    kp1 = px_ref[st == 1]
    kp2 = kp2[st == 1]

    return kp1, kp2
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def calculateCorners(self, gray, points=None):
        '''
        gray is OpenCV gray image,
        points is Marker.points
        >>> marker.calculateCorners(gray)
        >>> print(marker.corners)
        '''
        if points is None: points = self.points
        if points is None: raise TypeError('calculateCorners need a points value')
        '''
        rotations = 0 -> 0,1,2,3
        rotations = 1 -> 3,0,1,2
        rotations = 2 -> 2,3,0,1
        rotations = 3 -> 1,2,3,0
        => A: 1,0,3,2; B: 0,3,2,1; C: 2,1,0,3; D: 3,2,1,0
        '''
        i = self.rotations
        A = (1,0,3,2)[i]; B = (0,3,2,1)[i]; C = (2,1,0,3)[i]; D = (3,2,1,0)[i]
        corners = np.float32([points[A], points[B], points[C], points[D]])
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.corners = cv2.cornerSubPix(gray, corners, (5,5), (-1,-1), criteria)
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def _get_alignment(im_ref, im_to_align, key):
    if key is not None:
        cached_path = Path('align_cache').joinpath('{}.alignment'.format(key))
        if cached_path.exists():
            with cached_path.open('rb') as f:
                return pickle.load(f)
    logger.info('Getting alignment for {}'.format(key))
    warp_mode = cv2.MOTION_TRANSLATION
    warp_matrix = np.eye(2, 3, dtype=np.float32)
    criteria = (
        cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000,  1e-8)
    cc, warp_matrix = cv2.findTransformECC(
        im_ref, im_to_align, warp_matrix, warp_mode, criteria)
    if key is not None:
        with cached_path.open('wb') as f:
            pickle.dump((cc, warp_matrix), f)
    logger.info('Got alignment for {} with cc {:.3f}: {}'
                .format(key, cc, str(warp_matrix).replace('\n', '')))
    return cc, warp_matrix
项目:action-recoginze    作者:WeiruZ    | 项目源码 | 文件源码
def k_means(self, a_frame, K=2):
        """
        :param a_frame:
        :param K:
        :return: np.ndarray draw the frame use K color's centers
        """
        i = 0
        Z = a_frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((a_frame.shape))

        return res2
项目:action-recoginze    作者:WeiruZ    | 项目源码 | 文件源码
def cluster(frame_matrix):
    new_frame_matrix = []
    i = 0
    for frame in frame_matrix:
        print "reader {} frame".format(i)
        i += 1
        Z = frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2

        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((frame.shape))

        new_frame_matrix.append(res2)
        cv2.imshow('res2', res2)
        cv2.waitKey(1)
    cv2.destroyAllWindows()
项目:object-classification    作者:HenrYxZ    | 项目源码 | 文件源码
def gen_codebook(dataset, descriptors, k = 64):
    """
    Generate a k codebook for the dataset.

    Args:
        dataset (Dataset object): An object that stores information about the dataset.
        descriptors (list of integer arrays): The descriptors for every class.
        k (integer): The number of clusters that are going to be calculated.

    Returns:
        list of integer arrays: The k codewords for the dataset.
    """
    iterations = 10
    epsilon = 1.0
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, iterations, epsilon)
    compactness, labels, centers = cv2.kmeans(descriptors, k , criteria, iterations, cv2.KMEANS_RANDOM_CENTERS)
    return centers
项目:Recognition    作者:thautwarm    | 项目源码 | 文件源码
def deal(self,frame):
        frame=frame.copy()
        track_window=self.track_window
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        roi_hist=self.roi_hist 
        dst = cv2.calcBackProject([frame],[0],roi_hist,[0,180],1)
        if self.m=='m':
            ret, track_window_r = cv2.meanShift(dst, track_window, term_crit)
            x,y,w,h = track_window_r
            img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        elif self.m=='c':
            ret, track_window_r = cv2.CamShift(dst, track_window, term_crit)


            pts = cv2.boxPoints(ret)
            pts = np.int0(pts)
            img2 = cv2.polylines(frame,[pts],True, 255,2)
        rectsNew=[]

        center1=(track_window[0]+track_window[2]//2,track_window[1]+track_window[3]//2)
        center2=(track_window_r[0]+track_window_r[2]//2,track_window_r[1]+track_window_r[3]//2)
        img2 = cv2.line(img2,center1,center2,color=0)
        rectsNew=track_window_r
#        x,y,w,h = track_window
#        img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        cv2.imshow('img2',img2)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
        return rectsNew
项目:ArkwoodAR    作者:rdmilligan    | 项目源码 | 文件源码
def get_vectors(image, points, mtx, dist):

    # order points
    points = _order_points(points)

    # set up criteria, image, points and axis
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    imgp = np.array(points, dtype='float32')

    objp = np.array([[0.,0.,0.],[1.,0.,0.],
                        [1.,1.,0.],[0.,1.,0.]], dtype='float32')  

    # calculate rotation and translation vectors
    cv2.cornerSubPix(gray,imgp,(11,11),(-1,-1),criteria)
    rvecs, tvecs, _ = cv2.solvePnPRansac(objp, imgp, mtx, dist)

    return rvecs, tvecs
项目:Camera2TCP    作者:kevinkit    | 项目源码 | 文件源码
def affine(self):
        warp_mode = cv2.MOTION_HOMOGRAPHY
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000,  1e-10)
        warp_matrix = np.eye(3, 3, dtype=np.float32)

        while True:
            try:
                if self.ret[0] is not None and self.client[0].img is not None:
                    master_cam_grey = cv2.cvtColor(self.client[0].img, cv2.COLOR_BGR2GRAY)
                else:
                    print("Image was none!")
                for i in range(1,self.cams):
                    if self.ret[i] is not None:
                        print("Trying to calibrate")
                        slave_cam = cv2.cvtColor(self.client[i].img, cv2.COLOR_BGR2GRAY)
                        try:
                            (cc, warp_matrix) = cv2.findTransformECC (self.get_gradient(master_cam_grey), self.get_gradient(slave_cam),warp_matrix, warp_mode, criteria)
                        except Exception as e:
                            print(e)
                        print(warp_matrix)
                    else:
                        print("Image was none")
                        ti.sleep(5);
            except:
                ti.sleep(1)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def subpixel_pts(self, im, pts): 
        """Perform subpixel refinement"""
        term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
        cv2.cornerSubPix(im, pts, (10, 10), (-1, -1), term)
        return
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def cal_fromcorners(self, good):
        # Perform monocular calibrations
        lcorners = [(l, b) for (l, r, b) in good]
        rcorners = [(r, b) for (l, r, b) in good]
        self.l.cal_fromcorners(lcorners)
        self.r.cal_fromcorners(rcorners)

        lipts = [ l for (l, _, _) in good ]
        ripts = [ r for (_, r, _) in good ]
        boards = [ b for (_, _, b) in good ]

        opts = self.mk_object_points(boards, True)

        flags = cv2.CALIB_FIX_INTRINSIC

        self.T = numpy.zeros((3, 1), dtype=numpy.float64)
        self.R = numpy.eye(3, dtype=numpy.float64)
        if LooseVersion(cv2.__version__).version[0] == 2:
            cv2.stereoCalibrate(opts, lipts, ripts, self.size,
                               self.l.intrinsics, self.l.distortion,
                               self.r.intrinsics, self.r.distortion,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)
        else:
            cv2.stereoCalibrate(opts, lipts, ripts,
                               self.l.intrinsics, self.l.distortion,
                               self.r.intrinsics, self.r.distortion,
                               self.size,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)

        self.set_alpha(0.0)
项目:self-driving    作者:BoltzmannBrain    | 项目源码 | 文件源码
def __init__(self, videoSource, featurePtMask=None, verbosity=0):
    # cap the length of optical flow tracks
    self.maxTrackLength = 10

    # detect feature points in intervals of frames; adds robustness for
    # when feature points disappear.
    self.detectionInterval = 5

    # Params for Shi-Tomasi corner (feature point) detection
    self.featureParams = dict(
        maxCorners=500,
        qualityLevel=0.3,
        minDistance=7,
        blockSize=7
    )
    # Params for Lucas-Kanade optical flow
    self.lkParams = dict(
        winSize=(15, 15),
        maxLevel=2,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
    )
    # # Alternatively use a fast feature detector
    # self.fast = cv2.FastFeatureDetector_create(500)

    self.verbosity = verbosity

    (self.videoStream,
     self.width,
     self.height,
     self.featurePtMask) = self._initializeCamera(videoSource)
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_features():
    from atx.drivers.android_minicap import AndroidDeviceMinicap
    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    # r, h, c, w = 200, 100, 200, 100
    # track_window = (c, r, w, h)
    # oldimg = cv2.imread('base1.png')
    # roi = oldimg[r:r+h, c:c+w]
    # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv_roi, 0, 255)
    # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
    # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,  10, 1)


    while True:
        try:
            w, h = d._screen.shape[:2]
            img = cv2.resize(d._screen, (h/2, w/2))
            cv2.imshow('preview', img)

            hist = cv2.calcHist([img], [0], None, [256], [0,256])
            plt.plot(plt.hist(hist.ravel(), 256))
            plt.show()
            # if img.shape == oldimg.shape:
            #     # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            #     # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
            #     # x, y, w, h = track_window
            #     cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            #     cv2.imshow('preview', img)
            # # cv2.imshow('preview', img)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break

    cv2.destroyWindow('preview')
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_kmeans(img):
    ## K????
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey()
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_features():
    from atx.drivers.android_minicap import AndroidDeviceMinicap
    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    # r, h, c, w = 200, 100, 200, 100
    # track_window = (c, r, w, h)
    # oldimg = cv2.imread('base1.png')
    # roi = oldimg[r:r+h, c:c+w]
    # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv_roi, 0, 255)
    # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
    # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,  10, 1)


    while True:
        try:
            w, h = d._screen.shape[:2]
            img = cv2.resize(d._screen, (h/2, w/2))
            cv2.imshow('preview', img)

            hist = cv2.calcHist([img], [0], None, [256], [0,256])
            plt.plot(plt.hist(hist.ravel(), 256))
            plt.show()
            # if img.shape == oldimg.shape:
            #     # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            #     # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
            #     # x, y, w, h = track_window
            #     cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            #     cv2.imshow('preview', img)
            # # cv2.imshow('preview', img)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break

    cv2.destroyWindow('preview')
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_kmeans(img):
    ## K????
    z = img.reshape((-1, 3))
    z = np.float32(z)
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    ret, label, center = cv2.kmeans(z, 20, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))
    cv2.imshow('preview', res2)
    cv2.waitKey()
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def find_label_clusters(kitti_base, kittiLabels, shape, num_clusters, descriptors=None):
    if descriptors is None:
        progressbar = ProgressBar('Computing descriptors', max=len(kittiLabels))
        descriptors = []
        for label in kittiLabels:
            progressbar.next()
            img = getCroppedSampleFromLabel(kitti_base, label)
            # img = cv2.resize(img, (shape[1], shape[0]), interpolation=cv2.INTER_AREA)
            img = resizeSample(img, shape, label)
            hist = get_hog(img)
            descriptors.append(hist)
        progressbar.finish()
    else:
        print 'find_label_clusters,', 'Using supplied descriptors.'
        print len(kittiLabels), len(descriptors)
        assert(len(kittiLabels) == len(descriptors))

    # X = np.random.randint(25,50,(25,2))
    # Y = np.random.randint(60,85,(25,2))
    # Z = np.vstack((X,Y))

    # convert to np.float32
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret,label,center=cv2.kmeans(Z,K,None,criteria,attempts,cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(kittiLabels, label)
    return clusters
    # # Plot the data
    # from matplotlib import pyplot as plt
    # plt.scatter(A[:,0],A[:,1])
    # plt.scatter(B[:,0],B[:,1],c = 'r')
    # plt.scatter(center[:,0],center[:,1],s = 80,c = 'y', marker = 's')
    # plt.xlabel('Height'),plt.ylabel('Weight')
    # plt.show()
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def find_sample_clusters(pos_reg_generator, window_dims, hog, num_clusters):
    regions = list(pos_reg_generator)
    descriptors = trainhog.compute_hog_descriptors(hog, regions, window_dims, 1)

    # convert to np.float32
    descriptors = [rd.descriptor for rd in descriptors]
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret,label,center=cv2.kmeans(Z,K,None,criteria,attempts,cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(regions, label)
    return clusters
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def train_svm(svm_save_path, descriptors, labels):
    # train_data = convert_to_ml(descriptors)
    train_data = np.array(descriptors)
    responses = np.array(labels, dtype=np.int32)

    print "Start training..."
    svm = cv2.ml.SVM_create()
    # Default values to train SVM
    svm.setCoef0(0.0)
    svm.setDegree(3)
    # svm.setTermCriteria(TermCriteria(cv2.TERMCRIT_ITER + cv2.TERMCRIT_EPS, 1000, 1e-3))
    svm.setTermCriteria((cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 1000, 1e-3))
    svm.setGamma(0)
    svm.setKernel(cv2.ml.SVM_LINEAR)
    svm.setNu(0.5)
    svm.setP(0.1) # for EPSILON_SVR, epsilon in loss function?
    svm.setC(0.01) # From paper, soft classifier
    svm.setType(cv2.ml.SVM_EPS_SVR) # C_SVC; # EPSILON_SVR; # may be also NU_SVR; # do regression task
    svm.train(train_data, cv2.ml.ROW_SAMPLE, responses)
    print "...[done]"

    svm.save(svm_save_path)

# def test_classifier(svm_file_path, window_dims):
#     #  Set the trained svm to my_hog
#     hog_detector = get_svm_detector(svm_file_path)
#     hog = get_hog_object(window_dims)
#     hog.setSVMDetector(hog_detector)
#
#     locations = hog.detectMultiScale(img)
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def createTrainingInstances(self, images):
        instances = []
        img_descriptors = []
        master_descriptors = []
        cv2.ocl.setUseOpenCL(False)
        orb = cv2.ORB_create()
        for img, label in images:
            print img
            img = read_color_image(img)
            keypoints = orb.detect(img, None)
            keypoints, descriptors = orb.compute(img, keypoints)
            if descriptors is None:
                descriptors = []

            img_descriptors.append(descriptors)
            for i in descriptors:
                master_descriptors.append(i)


        master_descriptors = np.float32(master_descriptors)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        labels = labels.ravel()

        count = 0
        img_num = 0
        for img, label in images:
            histogram = np.zeros(self.center_num)
            feature_vector = img_descriptors[img_num]
            for f in xrange(len(feature_vector)):
                index = count + f
                histogram.itemset(labels[index], 1 + histogram.item(labels[index]))
            count += len(feature_vector)
            pairing = Instance(histogram, label)
            instances.append(pairing)

        self.training_instances = instances
        self.centers = centers
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def createTrainingInstances(self, images):
        instances = []
        img_descriptors = []
        master_descriptors = []
        cv2.ocl.setUseOpenCL(False)
        orb = cv2.ORB_create()
        for img, label in images:
            print img
            img = read_color_image(img)
            keypoints = orb.detect(img, None)
            keypoints, descriptors = orb.compute(img, keypoints)
            if descriptors is None:
                descriptors = []

            img_descriptors.append(descriptors)
            for i in descriptors:
                master_descriptors.append(i)


        master_descriptors = np.float32(master_descriptors)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        labels = labels.ravel()

        count = 0
        img_num = 0
        for img, label in images:
            histogram = np.zeros(self.center_num)
            feature_vector = img_descriptors[img_num]
            for f in xrange(len(feature_vector)):
                index = count + f
                histogram.itemset(labels[index], 1 + histogram.item(labels[index]))
            count += len(feature_vector)
            pairing = Instance(histogram, label)
            instances.append(pairing)

        self.training_instances = instances
        self.centers = centers
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def local_bow_train(image):
        instances = []
        img_descriptors = []
        master_descriptors = []
        cv2.ocl.setUseOpenCL(False)
        orb = cv2.ORB_create()
        for img, label in images:
            print img
            img = read_color_image(img)
            keypoints = orb.detect(img, None)
            keypoints, descriptors = orb.compute(img, keypoints)
            if descriptors is None:
                descriptors = []

            img_descriptors.append(descriptors)
            for i in descriptors:
                master_descriptors.append(i)


        master_descriptors = np.float32(master_descriptors)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        labels = labels.ravel()

        count = 0
        img_num = 0
        for img, label in images:
            histogram = np.zeros(self.center_num)
            feature_vector = img_descriptors[img_num]
            for f in xrange(len(feature_vector)):
                index = count + f
                histogram.itemset(labels[index], 1 + histogram.item(labels[index]))
            count += len(feature_vector)
            pairing = Instance(histogram, label)
            instances.append(pairing)

        self.training_instances = instances
        self.centers = centers
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        img = frame.img
        img_shape = img.shape[:-1][::-1] # width,height

        succeeding_frame = frame.index-self.prev_frame_idx == 1
        same_frame = frame.index == self.prev_frame_idx
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        #vars for calcOpticalFlowPyrLK
        lk_params = dict( winSize  = (90, 90),
                  maxLevel = 3,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03))

        updated_past_gaze = []

        #lets update past gaze using optical flow: this is like sticking the gaze points onto the pixels of the img.
        if self.past_gaze_positions and succeeding_frame:
            past_screen_gaze = np.array([denormalize(ng['norm_pos'] ,img_shape,flip_y=True) for ng in self.past_gaze_positions],dtype=np.float32)
            new_pts, status, err = cv2.calcOpticalFlowPyrLK(self.prev_gray,gray_img,past_screen_gaze,None,minEigThreshold=0.005,**lk_params)
            for gaze,new_gaze_pt,s,e in zip(self.past_gaze_positions,new_pts,status,err):
                if s:
                    # print "norm,updated",gaze['norm_gaze'], normalize(new_gaze_pt,img_shape[:-1],flip_y=True)
                    gaze['norm_pos'] = normalize(new_gaze_pt,img_shape,flip_y=True)
                    updated_past_gaze.append(gaze)
                    # logger.debug("updated gaze")

                else:
                    # logger.debug("dropping gaze")
                    # Since we will replace self.past_gaze_positions later,
                    # not appedning tu updated_past_gaze is like deliting this data point.
                    pass
        else:
            # we must be seeking, do not try to do optical flow, or pausing: see below.
            pass

        if same_frame:
            # paused
            # re-use last result
            events['gaze_positions'][:] = self.past_gaze_positions[:]
        else:
            # trim gaze that is too old
            if events['gaze_positions']:
                now = events['gaze_positions'][0]['timestamp']
                cutoff = now-self.timeframe
                updated_past_gaze = [g for g in updated_past_gaze if g['timestamp']>cutoff]

            #inject the scan path gaze points into recent_gaze_positions
            events['gaze_positions'][:] = updated_past_gaze + events['gaze_positions']
            events['gaze_positions'].sort(key=lambda x: x['timestamp']) #this may be redundant...

        #update info for next frame.
        self.prev_gray = gray_img
        self.prev_frame_idx = frame.index
        self.past_gaze_positions = events['gaze_positions']
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def downsample_and_detect(self, img):
        """
        Downsample the input image to approximately VGA resolution and detect the
        calibration target corners in the full-size image.

        Combines these apparently orthogonal duties as an optimization. Checkerboard
        detection is too expensive on large images, so it's better to do detection on
        the smaller display image and scale the corners back up to the correct size.

        Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
        """
        # Scale the input image down to ~VGA size
        height = img.shape[0]
        width = img.shape[1]
        scale = math.sqrt( (width*height) / (640.*480.) )
        if scale > 1.0:
            scrib = cv2.resize(img, (int(width / scale), int(height / scale)))
        else:
            scrib = img
        # Due to rounding, actual horizontal/vertical scaling may differ slightly
        x_scale = float(width) / scrib.shape[1]
        y_scale = float(height) / scrib.shape[0]

        if self.pattern == Patterns.Chessboard:
            # Detect checkerboard
            (ok, downsampled_corners, board) = self.get_corners(scrib, refine = True)

            # Scale corners back to full size image
            corners = None
            if ok:
                if scale > 1.0:
                    # Refine up-scaled corners in the original full-res image
                    # TODO Does this really make a difference in practice?
                    corners_unrefined = downsampled_corners.copy()
                    corners_unrefined[:, :, 0] *= x_scale
                    corners_unrefined[:, :, 1] *= y_scale
                    radius = int(math.ceil(scale))
                    if len(img.shape) == 3 and img.shape[2] == 3:
                        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    else:
                        mono = img
                    cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1),
                                                  ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
                    corners = corners_unrefined
                else:
                    corners = downsampled_corners
        else:
            # Circle grid detection is fast even on large images
            (ok, corners, board) = self.get_corners(img)
            # Scale corners to downsampled image for display
            downsampled_corners = None
            if ok:
                if scale > 1.0:
                    downsampled_corners = corners.copy()
                    downsampled_corners[:,:,0] /= x_scale
                    downsampled_corners[:,:,1] /= y_scale
                else:
                    downsampled_corners = corners

        return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getP(self, dst):
        """
        dst: ??????

        return self.MTX,self.DIST,self.RVEC,self.TVEC:
        ?? ?????????????????

        """
        if self.SceneImage is None:
            return None

        corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
        gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
        objp = np.zeros((2*2,3), np.float32)
        objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)

        corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)

        if self.PTimes < self.PCount or self.PCount == 0:
            # Arrays to store object points and image points from all the images.
            objpoints = self.OBJPoints # 3d point in real world space
            imgpoints = self.IMGPoints # 2d points in image plane.

            if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
                objpoints.append(objp)
                imgpoints.append(corners2)

            # Find mtx, dist, rvecs, tvecs
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
            if not ret:
                self.PTimes += 1
                return None
            self.OBJPoints = objpoints
            self.IMGPoints = imgpoints
            self.MTX = mtx
            self.DIST = dist
            self.RVEC = rvecs[0]
            self.TVEC = tvecs[0]
        else:
            # Find the rotation and translation vectors.
            _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
            self.RVEC = rvec
            self.TVEC = tvec
        self.PTimes += 1

        return self.MTX,self.DIST,self.RVEC,self.TVEC
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getP(self, dst):
        """
        dst: ??????

        return self.MTX,self.DIST,self.RVEC,self.TVEC:
        ?? ?????????????????

        """
        if self.SceneImage is None:
            return None

        corners = np.float32([dst[1], dst[0], dst[2], dst[3]])
        gray = cv2.cvtColor(self.SceneImage, cv2.COLOR_BGR2GRAY)
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (1,0,0), (1,1,0)
        objp = np.zeros((2*2,3), np.float32)
        objp[:,:2] = np.mgrid[0:2,0:2].T.reshape(-1,2)

        corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)

        if self.PTimes < self.PCount or self.PCount == 0:
            # Arrays to store object points and image points from all the images.
            objpoints = self.OBJPoints # 3d point in real world space
            imgpoints = self.IMGPoints # 2d points in image plane.

            if len(imgpoints) == 0 or np.sum(np.abs(imgpoints[-1] - corners2)) != 0:
                objpoints.append(objp)
                imgpoints.append(corners2)

            # Find mtx, dist, rvecs, tvecs
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
            if not ret:
                self.PTimes += 1
                return None
            self.OBJPoints = objpoints
            self.IMGPoints = imgpoints
            self.MTX = mtx
            self.DIST = dist
            self.RVEC = rvecs[0]
            self.TVEC = tvecs[0]
        else:
            # Find the rotation and translation vectors.
            _, rvec, tvec, _= cv2.solvePnPRansac(objp, corners2, self.MTX, self.DIST)
            self.RVEC = rvec
            self.TVEC = tvec
        self.PTimes += 1

        return self.MTX,self.DIST,self.RVEC,self.TVEC
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def calibrate_intrinsics(camera, image_points,
                         object_points,
                         use_rational_model=True,
                         use_tangential=False,
                         use_thin_prism=False,
                         fix_radial=False,
                         fix_thin_prism=False,
                         max_iterations=30,
                         use_existing_guess=False,
                         test=False):
    flags = 0
    if test:
        flags = flags | cv2.CALIB_USE_INTRINSIC_GUESS
        # fix everything
        flags = flags | cv2.CALIB_FIX_PRINCIPAL_POINT
        flags = flags | cv2.CALIB_FIX_ASPECT_RATIO
        flags = flags | cv2.CALIB_FIX_FOCAL_LENGTH
        # apparently, we can't fix the tangential distance. What the hell? Zero it out.
        flags = flags | cv2.CALIB_ZERO_TANGENT_DIST
        flags = fix_radial_flags(flags)
        flags = flags | cv2.CALIB_FIX_S1_S2_S3_S4
        criteria = (cv2.TERM_CRITERIA_MAX_ITER, 1, 0)
    else:
        if fix_radial:
            flags = fix_radial_flags(flags)
        if fix_thin_prism:
            flags = flags | cv2.CALIB_FIX_S1_S2_S3_S4
        criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, max_iterations,
                    2.2204460492503131e-16)
    if use_existing_guess:
        flags = flags | cv2.CALIB_USE_INTRINSIC_GUESS
    if not use_tangential:
        flags = flags | cv2.CALIB_ZERO_TANGENT_DIST
    if use_rational_model:
        flags = flags | cv2.CALIB_RATIONAL_MODEL
        if len(camera.intrinsics.distortion_coeffs) < 8:
            camera.intrinsics.distortion_coeffs.resize((8,))
    if use_thin_prism:
        flags = flags | cv2.CALIB_THIN_PRISM_MODEL
        if len(camera.intrinsics.distortion_coeffs) != 12:
            camera.intrinsics.distortion_coeffs = np.resize(camera.intrinsics.distortion_coeffs, (12,))
    return __calibrate_intrinsics(camera, image_points, object_points, flags, criteria)
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def track_features(self, image_ref, image_cur):
        """Track Features

        Parameters
        ----------
        image_ref : np.array
            Reference image
        image_cur : np.array
            Current image

        """
        # Re-detect new feature points if too few
        if len(self.tracks_tracking) < self.min_nb_features:
            self.tracks_tracking = []  # reset alive feature tracks
            self.detect(image_ref)

        # LK parameters
        win_size = (21, 21)
        max_level = 2
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.03)

        # Convert reference keypoints to numpy array
        self.kp_ref = self.last_keypoints()

        # Perform LK tracking
        lk_params = {"winSize": win_size,
                     "maxLevel": max_level,
                     "criteria": criteria}
        self.kp_cur, statuses, err = cv2.calcOpticalFlowPyrLK(image_ref,
                                                              image_cur,
                                                              self.kp_ref,
                                                              None,
                                                              **lk_params)

        # Filter out bad matches (choose only good keypoints)
        status = statuses.reshape(statuses.shape[0])
        still_alive = []
        for i in range(len(status)):
            if status[i] == 1:
                track_id = self.tracks_tracking[i]
                still_alive.append(track_id)
                kp = KeyPoint(self.kp_cur[i], 0)
                self.tracks[track_id].update(self.frame_id, kp)

        self.tracks_tracking = still_alive
项目:ocular    作者:wolfd    | 项目源码 | 文件源码
def __init__(self, image_topic, feature_detector='FAST'):
        super(OpticalFlowMatcher, self).__init__()

        rospy.init_node('optical_flow_matcher')

        self.cv_bridge = CvBridge()

        self.rectified_image_topic = rospy.Subscriber(
            image_topic,
            Image,
            self.new_image_callback
        )

        self.pub_keypoint_motion = rospy.Publisher(
            'keypoint_motion',
            KeypointMotion,
            queue_size=10
        )

        self.feature_params = None

        if feature_detector == 'FAST':
            self.get_features = self.get_features_fast
            # Initiate FAST detector with default values
            self.fast = cv2.FastFeatureDetector_create()

            self.fast.setThreshold(20)

        elif feature_detector == 'GOOD':
            self.get_features = self.get_features_good
            # params for ShiTomasi 'GOOD' corner detection
            self.feature_params = dict(
                maxCorners=200,
                qualityLevel=0.3,
                minDistance=7,
                blockSize=7
            )
        else:
            raise Exception(
                '{} feature detector not implemented'.format(feature_detector)
            )

        # Parameters for lucas kanade optical flow
        self.lk_params = dict(
            winSize=(15, 15),
            maxLevel=2,
            criteria=(
                cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                10,
                0.03
            )
        )

        self.last_frame_gray = None
        self.good_old = None
        self.good_new = None
项目:GradLab    作者:Ajf4163    | 项目源码 | 文件源码
def translationalThermalReg(im1,im2):
    import cv2,numpy

    #get dimensions
    s1=im1.shape
    s2=im2.shape

    #check sizes agree as a sanity check for inputs

    if s1!=s2:
        raise TypeError('Array Inputs are of different sizes!')

    #Select translation model in CV
    warp_model = cv2.MOTION_AFFINE

    #Define 2x3 Warp Matrix
    warp_matrix = numpy.eye(2, 3, dtype=numpy.float32)

    #Number of iterations allowed to converge on solution
    num_it=10000

    #Terminal Threshold
    termTh = 1e-9

    #Define Stopping Criteria
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, num_it,  termTh)

    #Ensure images are of datatype float32 (for compatibility with transformation convergence)
    im1=im1.astype(numpy.float32)
    im2=im2.astype(numpy.float32)

    #Find Ideal Transform given input parameters
    (cc, warp_matrix) = cv2.findTransformECC(im1,im2,warp_matrix, warp_model, criteria)

    #Apply Transform
    aligned = cv2.warpAffine(im2, warp_matrix, (s1[1], s1[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
    print('Calculated Affine Warp Matrix:')
    print(warp_matrix)

    return aligned, warp_matrix


#Test Harness for debugging and testing of functions
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def camera_cal(self, image):

        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        nx = 8
        ny = 6

        dst = np.copy(image) 

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((ny * nx, 3), np.float32)
        objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)

        # Arrays to store object points and image points from all the images.
        objpoints = [] # 3d points in real world space
        imgpoints = [] # 2d points in image plane.

        # Search for chessboard corners
        grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        #ret_thresh,  mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)

        ret, corners = cv2.findChessboardCorners(image, (nx, ny), None)  #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))        

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)           
            cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
            imgpoints.append(corners)
            self.calibrated = True
            print ("FOUND!")

            #Draw and display the corners
            cv2.drawChessboardCorners(image, (nx, ny), corners, ret)  

            # Do camera calibration given object points and image points
            ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)        

            # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
            dist_pickle = {}
            dist_pickle["mtx"] = self.mtx
            dist_pickle["dist"] = self.dist
            dist_pickle['objpoints'] = objpoints
            dist_pickle['imgpoints'] = imgpoints
            pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )

         #else:
             #print("Searching...")

        return image
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def find_chessboard(self, sx=6, sy=9):
        """Finds the corners of an sx X sy chessboard in the image.

        Parameters
        ----------
        sx : int
            Number of chessboard corners in x-direction.
        sy : int
            Number of chessboard corners in y-direction.

        Returns
        -------
        :obj:`list` of :obj:`numpy.ndarray`
            A list containing the 2D points of the corners of the detected
            chessboard, or None if no chessboard found.
        """
        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS +
                    cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = np.zeros((sx * sy, 3), np.float32)
        objp[:, :2] = np.mgrid[0:sx, 0:sy].T.reshape(-1, 2)

        # Arrays to store object points and image points from all the images.
        objpoints = []  # 3d point in real world space
        imgpoints = []  # 2d points in image plane.

        # create images
        img = self.data.astype(np.uint8)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (sx, sy), None)

        # If found, add object points, image points (after refining them)
        if ret:
            objpoints.append(objp)
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
            imgpoints.append(corners)

            if corners is not None:
                return corners.squeeze()
        return None