Python cv2 模块,drawMatchesKnn() 实例源码

我们从Python开源项目中,提取了以下4个代码示例,用于说明如何使用cv2.drawMatchesKnn()

项目:Recognition    作者:thautwarm    | 项目源码 | 文件源码
def SIFTMATCH(img1,img2):
    img1=img1.copy()
    img2=img2.copy()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = Sift.detectAndCompute(img1,None)
    kp2, des2 = Sift.detectAndCompute(img2,None)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    # Apply ratio test
    matchesMask = [[0,0] for i in range(len(matches))]
    for i,(m,n) in enumerate(matches):
        if 0.55*n.distance<m.distance < 0.80*n.distance:
            matchesMask[i]=[1,0]
            # cv2.drawMatchesKnn expects list of lists as matches.
    img3=None
    draw_params=dict(matchesMask=matchesMask)
    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,flags=2,**draw_params)
#    img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,img3,flags=2)
    plt.imshow(img3,cmap='gray')
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)

        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and y2 > self.yThreshold and x2 < self.xThreshold:
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster = self.compare_distances(train_img, cluster)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, good_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True
项目:pytomatic    作者:N0K0    | 项目源码 | 文件源码
def find_features_in_array_SIFT(self, sub_image, main_image, debug=False):
        # Initiate SIFT detector
        sift = SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(sub_image, None)
        kp2, des2 = sift.detectAndCompute(main_image, None)

        # BFMatcher with default params
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)

        logging.debug("Found {} possible matches".format(len(matches)))

        ret_list = []
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append([m])

        good.sort(key=lambda x: x[0].distance)

        if debug:
            # cv2.drawMatchesKnn expects list of lists as matches.
            img3 = cv2.drawMatchesKnn(sub_image, kp1, main_image, kp2, good, flags=2, outImg=None,
                                      matchColor=(255, 255, 0))
            plt.imshow(img3), plt.show()

        ret_list = []
        for match in good:
            index = match[0].trainIdx
            point = kp2[index].pt
            ret_list.append((int(point[0]), int(point[1])))

        logging.debug("After filtering {}".format(len(good)))
        return ret_list
项目:Analog-Utility-Meter-Reader    作者:arjun372    | 项目源码 | 文件源码
def main():
    stream=urllib.urlopen(CAM_URL)
    bytes=''
    ts=time.time()
    while True:
        bytes+=stream.read(2048)
        a = bytes.find('\xff\xd8')
        b = bytes.find('\xff\xd9')
        if a==-1 or b==-1:
            continue

        # Frame available
        rtimestamp=time.time()
        jpg = bytes[a:b+2]
        bytes= bytes[b+2:]
        img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
        cv2.imshow('RAW',img)

        #ORB to get corresponding points
        kp, des = sift.detectAndCompute(img,None)
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des_ref,des,k=2)
        m = []
        for ma,na in matches:
            if ma.distance < 0.75*na.distance:
                m.append([ma])
        img3 = cv2.drawMatchesKnn(img_ref,kp_ref,img,kp,m[:4], None,flags=2)
        cv2.imshow('MatchesKnn',img3)

        #pts_ref = np.float32([[kp_ref[m[0].queryIdx].pt[0],kp_ref[m[0].queryIdx].pt[1]],[kp_ref[m[1].queryIdx].pt[0],kp_ref[m[1].queryIdx].pt[1]],[kp_ref[m[2].queryIdx].pt[0],kp_ref[m[2].queryIdx].pt[1]],[kp_ref[m[3].queryIdx].pt[0],kp_ref[m[3].queryIdx].pt[1]]])
        #pts     = np.float32([[kp[m[0].trainIdx].pt[0],kp[m[0].trainIdx].pt[1]],[kp[m[1].trainIdx].pt[0],kp[m[1].trainIdx].pt[1]],[kp[m[2].trainIdx].pt[0],kp[m[2].trainIdx].pt[1]],[kp[m[3].trainIdx].pt[0],kp[m[3].trainIdx].pt[1]]])
        # Perspective Transform
        #M = cv2.getPerspectiveTransform(pts_ref,pts)
        #dst = cv2.warpPerspective(img,M,(cols,rows))
        #cv2.imshow('Perspective Transform',dst)

        # Print lag
        print(time.time()-ts)
        ts=time.time()

        if cv2.waitKey(1) == 27:
            exit(0)