Python cv2 模块,KeyPoint() 实例源码

我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用cv2.KeyPoint()

项目:OCV_Vehicles_Features    作者:dan-masek    | 项目源码 | 文件源码
def load(file_name):
        data = pickle.load(open(file_name, "rb" ))

        keypoints = []
        descriptors = []

        for entry in data:
            point = entry[0]
            size = entry[1]
            angle = entry[2]
            response = entry[3]
            octave = entry[4]
            class_id = entry[5]

            keypoints.append(cv2.KeyPoint(x=point[0],y=point[1]
                , _size=size
                , _angle=angle
                , _response=response
                , _octave=octave
                , _class_id=class_id))

            descriptors.append(entry[6])

        return KeypointData(keypoints, np.array(descriptors, np.float32))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def detect(self, im, mask=None): 
        tags = self.detector.process(im, return_poses=False)
        kpts = []
        for tag in tags: 
            kpts.extend([cv2.KeyPoint(pt[0], pt[1], 1) for pt in tag.getFeatures()])
        return kpts
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def to_kpt(pt, size=1): 
    return cv2.KeyPoint(pt[0], pt[1], size)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def to_kpts(pts, size=1): 
    return [cv2.KeyPoint(pt[0], pt[1], size) for pt in pts]
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def detect(self, im, mask=None): 
        tags = self.detector_.process(im, return_poses=False)
        kpts = []
        for tag in tags: 
            kpts.extend([cv2.KeyPoint(pt[0], pt[1], 1) for pt in tag.getFeatures()])
        return kpts
项目:AlphaLogo    作者:gigaflw    | 项目源码 | 文件源码
def draw_keypoints(self, im, keypoints, filename="keypoints.jpg"):
        self._log("drawing keypoints into '%s'..." % filename)
        rows, cols = im.shape

        def to_cv2_kp(kp):
            # assert kp = [<row>, <col>, <ori>, <octave_ind>, <layer_ind>]
            ratio = get_size_ratio_by_octave(kp[3])
            scale = get_scale_by_ind(kp[3], kp[4])
            return cv2.KeyPoint(kp[1] / ratio, kp[0] / ratio, 10, kp[2] / PI * 180)

        kp_for_draw = list(map(to_cv2_kp, keypoints))
        im_kp = cv2.drawKeypoints(im, kp_for_draw, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        cv2.imwrite(filename, im_kp)
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def setUp(self):
        self.image_height = 600
        self.ransac = VerticalRANSAC(self.image_height)

        # Load test images
        data_path = test.TEST_DATA_PATH
        img0 = cv2.imread(os.path.join(data_path, "vo", "0.png"))
        img1 = cv2.imread(os.path.join(data_path, "vo", "1.png"))

        # Detect features
        tracker = FeatureTracker()
        f0 = tracker.detect(img0)
        f1 = tracker.detect(img1)

        # Convert Features to cv2.KeyPoint and descriptors (np.array)
        kps0 = [cv2.KeyPoint(f.pt[0], f.pt[1], f.size) for f in f0]
        des0 = np.array([f.des for f in f0])
        kps1 = [cv2.KeyPoint(f.pt[0], f.pt[1], f.size) for f in f1]
        des1 = np.array([f.des for f in f1])

        # Perform matching and sort based on distance
        # Note: arguments to the brute-force matcher is (query descriptors,
        # train descriptors), here we use des1 as the query descriptors becase
        # des1 represents the latest descriptors from the latest image frame
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = matcher.match(des1, des0)
        matches = sorted(matches, key=lambda x: x.distance)

        # Prepare data for RANSAC outlier rejection
        self.src_pts = np.float32([kps0[m.trainIdx].pt for m in matches])
        self.dst_pts = np.float32([kps1[m.queryIdx].pt for m in matches])
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def as_cv_keypoint(self):
        return cv2.KeyPoint(self.pt[0],
                            self.pt[1],
                            self.size,
                            self.angle,
                            self.response,
                            self.octave,
                            self.class_id)
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def update(self, frame_id, data):
        """Update feature track

        Parameters
        ----------
        frame_id : int
            Frame id
        data : KeyPoint or Feature
            data

        """
        self.frame_end = frame_id
        self.track.append(data)
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def last(self):
        """Return last data point

        Parameters
        ----------

        Returns
        -------
        type
            Last keypoint (KeyPoint)

        """
        return self.track[-1]
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def draw_features(img, features):
    """Draw features"""
    # Convert to OpenCV KeyPoints
    cv_kps = []
    for f in features:
        cv_kps.append(cv2.KeyPoint(f.pt[0], f.pt[1], f.size))

    # Draw keypoints
    img = cv2.drawKeypoints(img, cv_kps, None, color=(0, 255, 0))
    return img
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def detect_keypoints(self, frame):
        """Detect

        Parameters
        ----------
        frame : np.array
            Image frame
        debug : bool
            Debug mode (Default value = False)

        Returns
        -------
        results : list of KeyPoint
            List of KeyPoints

        """
        # Detect
        keypoints = self.detector.detect(frame)
        results = [KeyPoint(kp.pt, kp.size) for kp in keypoints]

        return results
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def detect_keypoints(self, frame):
        """Detect keypoints

        Parameters
        ----------
        frame : np.array
            Image frame

        Returns
        -------
        kps : list of KeyPoint
            Keypoints

        """
        # Detect keypoints
        keypoints = self.orb.detect(frame, None)

        # Convert OpenCV KeyPoint to KeyPoint
        kps = []
        for i in range(len(keypoints)):
            kp = keypoints[i]
            kps.append(KeyPoint(kp.pt,
                                kp.size,
                                kp.angle,
                                kp.response,
                                kp.octave))

        return kps
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def extract_descriptors(self, frame, kps):
        """Extract feature descriptors

        Parameters
        ----------
        frame : np.array
            Image frame
        kps: list of KeyPoint
            Key points

        Returns
        -------
        des : list of np.array
            Descriptors

        """
        cv_kps = convert2cvkeypoints(kps)
        cv_kps, des = self.orb.compute(frame, cv_kps)

        # Convert OpenCV KeyPoint to KeyPoint
        kps = []
        for cv_kp in cv_kps:
            kps.append(KeyPoint(cv_kp.pt,
                                cv_kp.size,
                                cv_kp.angle,
                                cv_kp.response,
                                cv_kp.octave,
                                cv_kp.class_id))

        return kps, des
项目:finch    作者:chrisranderson    | 项目源码 | 文件源码
def harris_corners(image):
  i = 2
  j = 11
  k = 4
  copy = np.copy(image)
  corners = cv2.cornerHarris(grayscale(copy), i, j, k/20)
  # dst = cv2.dilate(dst, None)
  # copy[dst > 0.01 * dst.max()] =  [0, 0, 255]
  # cv2.imwrite('{}-{}-{}.png'.format(i, j, k), copy)
  return [cv2.KeyPoint(corner[0], corner[1], 10) for corner in corners]
项目:finch    作者:chrisranderson    | 项目源码 | 文件源码
def orb_features(image, keypoints):
  if isinstance(keypoints, np.ndarray):
    # takes in x, y coordinates. size is the diameter of the descripted area
    keypoints = [cv2.KeyPoint(p[0], p[1], ORB_DESCRIPTOR_SIZE) for p in keypoints]

  orb = cv2.ORB_create()
  new_keypoints, descriptors = orb.compute(np.mean(image, axis=2).astype(np.uint8), keypoints)

  print('len(keypoints)', len(keypoints))
  print('len(new_keypoints)', len(new_keypoints))
  return new_keypoints, descriptors
项目:CS412_ComputerVision    作者:Tmbao    | 项目源码 | 文件源码
def detect(self, image):
    harrisResponse = cv2.cornerHarris(image, 
        self._block_size,
        self._aperture_size,
        self._alpha)
    points = np.argwhere(harrisResponse > harrisResponse.max() * self._quality_level)
    return [cv2.KeyPoint(point[0], point[1], self._feature_size) for point in points]
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def _rotate_keypoint_to_bottom_right(self, keypoint: cv2.KeyPoint) -> None:
        current_degrees = self._estimate_current_anticlockwise_degrees(keypoint)
        self._img = _rotate_image_anticlockwise_without_cropping(-current_degrees, self._img)
        self.intermediate_images.append(NamedImage(self._img.copy(), 'Rotated Image'))
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def findMatchesBetweenImages(image_1, image_2):
  """ Return the top 10 list of matches between two input images.

  This function detects and computes SIFT (or ORB) from the input images, and
  returns the best matches using the normalized Hamming Distance.

  Args:
    image_1 (numpy.ndarray): The first image (grayscale).
    image_2 (numpy.ndarray): The second image. (grayscale).

  Returns:
    image_1_kp (list): The image_1 keypoints, the elements are of type
                       cv2.KeyPoint.
    image_2_kp (list): The image_2 keypoints, the elements are of type
                       cv2.KeyPoint.
    matches (list): A list of matches, length 10. Each item in the list is of
                    type cv2.DMatch.

  """
  # matches - type: list of cv2.DMath
  matches = None
  # image_1_kp - type: list of cv2.KeyPoint items.
  image_1_kp = None
  # image_1_desc - type: numpy.ndarray of numpy.uint8 values.
  image_1_desc = None
  # image_2_kp - type: list of cv2.KeyPoint items.
  image_2_kp = None
  # image_2_desc - type: numpy.ndarray of numpy.uint8 values.
  image_2_desc = None

  # WRITE YOUR CODE HERE.
  #init
  sift = SIFT()

  #1. Compute SIFT keypoints and descriptors for both images
  image_1_kp, image_1_desc = sift.detectAndCompute(image_1,None)
  image_2_kp, image_2_desc = sift.detectAndCompute(image_2,None)

  #2. Create a Brute Force Matcher, using the hamming distance (and set crossCheck to true).
  #create BFMatcher object
  bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

  #3. Compute the matches between both images.
  #match descriptors
  matches = bf.match(image_1_desc,image_2_desc)

  #4. Sort the matches based on distance so you get the best matches.
  matches = sorted(matches, key=lambda x: x.distance)

  #5. Return the image_1 keypoints, image_2 keypoints, and the top 10 matches in a list.

  return image_1_kp, image_2_kp, matches[:10]
  # END OF FUNCTION.
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def track_features(self, image_ref, image_cur):
        """Track Features

        Parameters
        ----------
        image_ref : np.array
            Reference image
        image_cur : np.array
            Current image

        """
        # Re-detect new feature points if too few
        if len(self.tracks_tracking) < self.min_nb_features:
            self.tracks_tracking = []  # reset alive feature tracks
            self.detect(image_ref)

        # LK parameters
        win_size = (21, 21)
        max_level = 2
        criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.03)

        # Convert reference keypoints to numpy array
        self.kp_ref = self.last_keypoints()

        # Perform LK tracking
        lk_params = {"winSize": win_size,
                     "maxLevel": max_level,
                     "criteria": criteria}
        self.kp_cur, statuses, err = cv2.calcOpticalFlowPyrLK(image_ref,
                                                              image_cur,
                                                              self.kp_ref,
                                                              None,
                                                              **lk_params)

        # Filter out bad matches (choose only good keypoints)
        status = statuses.reshape(statuses.shape[0])
        still_alive = []
        for i in range(len(status)):
            if status[i] == 1:
                track_id = self.tracks_tracking[i]
                still_alive.append(track_id)
                kp = KeyPoint(self.kp_cur[i], 0)
                self.tracks[track_id].update(self.frame_id, kp)

        self.tracks_tracking = still_alive
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def draw_matches(img1, kp1, img2, kp2, matches, color=None, thickness=2, r=15):
    """Draws lines between matching keypoints of two images.  
    Keypoints not in a matching pair are not drawn.
    Places the images side by side in a new image and draws circles 
    around each keypoint, with line segments connecting matching pairs.
    You can tweak the r, thickness, and figsize values as needed.
    Args:
        img1: An openCV image ndarray in a grayscale or color format.
        kp1: A list of cv2.KeyPoint objects for img1.
        img2: An openCV image ndarray of the same format and with the same 
        element type as img1.
        kp2: A list of cv2.KeyPoint objects for img2.
        matches: A list of DMatch objects whose trainIdx attribute refers to 
        img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
        color: The color of the circles and connecting lines drawn on the images.  
        A 3-tuple for color images, a scalar for grayscale images.  If None, these
        values are randomly generated.  
    """
    # We're drawing them side by side.  Get dimensions accordingly.
    # Handle both color and grayscale images.
    if len(img1.shape) == 3:
        new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[
                     1] + img2.shape[1], img1.shape[2])
    elif len(img1.shape) == 2:
        new_shape = (
            max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1])
    new_img = np.zeros(new_shape, type(img1.flat[0]))
    # Place images onto the new image.
    new_img[0:img1.shape[0], 0:img1.shape[1]] = img1
    new_img[0:img2.shape[0], img1.shape[1]
        :img1.shape[1] + img2.shape[1]] = img2

    # Draw lines between matches.  Make sure to offset kp coords in second
    # image appropriately.
    if color:
        c = color
    for m in matches:
        # Generate random color for RGB/BGR and grayscale images as needed.
        if not color:
            c = np.random.randint(0, 256, 3) if len(
                img1.shape) == 3 else np.random.randint(0, 256)
        # So the keypoint locs are stored as a tuple of floats.  cv2.line(), like most other things,
        # wants locs as a tuple of ints.
        end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
        end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(
            int) + np.array([img1.shape[1], 0]))
        cv2.line(new_img, end1, end2, c, thickness)
        cv2.circle(new_img, end1, r, c, thickness)
        cv2.circle(new_img, end2, r, c, thickness)
    return new_img
项目:ocular    作者:wolfd    | 项目源码 | 文件源码
def draw_matches(img1, kp1, img2, kp2, matches, color=None): 
    """Draws lines between matching keypoints of two images.  
    Keypoints not in a matching pair are not drawn.
    Places the images side by side in a new image and draws circles 
    around each keypoint, with line segments connecting matching pairs.
    You can tweak the r, thickness, and figsize values as needed.
    Args:
        img1: An openCV image ndarray in a grayscale or color format.
        kp1: A list of cv2.KeyPoint objects for img1.
        img2: An openCV image ndarray of the same format and with the same 
        element type as img1.
        kp2: A list of cv2.KeyPoint objects for img2.
        matches: A list of DMatch objects whose trainIdx attribute refers to 
        img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
        color: The color of the circles and connecting lines drawn on the images.  
        A 3-tuple for color images, a scalar for grayscale images.  If None, these
        values are randomly generated.  
    """
    # We're drawing them side by side.  Get dimensions accordingly.
    # Handle both color and grayscale images.
    if len(img1.shape) == 3:
        new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], img1.shape[2])
    elif len(img1.shape) == 2:
        new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1])
    new_img = np.zeros(new_shape, type(img1.flat[0]))  
    # Place images onto the new image.
    new_img[0:img1.shape[0],0:img1.shape[1]] = img1
    new_img[0:img2.shape[0],img1.shape[1]:img1.shape[1]+img2.shape[1]] = img2

    # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
    r = 15
    thickness = 2
    if color:
        c = color
    for m in matches:
        # Generate random color for RGB/BGR and grayscale images as needed.
        if not color: 
            c = np.random.randint(0,256,3) if len(img1.shape) == 3 else np.random.randint(0,256)
        # So the keypoint locs are stored as a tuple of floats.  cv2.line(), like most other things,
        # wants locs as a tuple of ints.
        end1 = tuple(np.round(kp1[m.queryIdx].pt).astype(int))
        end2 = tuple(np.round(kp2[m.trainIdx].pt).astype(int) + np.array([img1.shape[1], 0]))
        cv2.line(new_img, end1, end2, c, thickness)
        cv2.circle(new_img, end1, r, c, thickness)
        cv2.circle(new_img, end2, r, c, thickness)

    plt.figure(figsize=(15,15))
    plt.imshow(new_img)
    plt.show()