Python cv2 模块,findHomography() 实例源码

我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用cv2.findHomography()

项目:Roomba980-Python    作者:NickWaterton    | 项目源码 | 文件源码
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image
项目:LogoDetectionInVideo    作者:nmemme    | 项目源码 | 文件源码
def bf_knnmatches( matches, img, kp1, kp2):
    MIN_MATCH_COUNT = 10
    # store all the good matches as per Lowe's ratio test.
    good = []
    dst = []
    if len(matches[0]) == 2:
        for m, n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            if M is not None:
                h, w = img.shape
                pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
                dst = cv2.perspectiveTransform(pts, M)
            else:
                dst = []
    else:
        dst = []
    return dst
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def maskFace(self, frame_image, face):

        img1 = cv2.imread(self.__class__.mask_path, cv2.IMREAD_UNCHANGED);
        elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
        mask = self.getTransPIL(cv2.warpPerspective(img1, h, (frame_image.width,frame_image.height)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))

        enhancer = ImageEnhance.Color(frame_image)
        enhanced = enhancer.enhance(0.1)
        enhancer = ImageEnhance.Brightness(enhanced)
        enhanced = enhancer.enhance(1.2)
        enhancer = ImageEnhance.Contrast(enhanced)
        enhanced = enhancer.enhance(1.2)

        frame_image.paste(enhanced, (0,0), mask)
        frame_image.paste(mask_elements, (0,0), mask_elements)
项目:quadrilaterals-rectifier    作者:michal2229    | 项目源码 | 文件源码
def extract_rect(im):
    imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)

    ret,thresh = cv2.threshold(imgray, 127, 255, 0)

    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # finding contour with max area
    largest = None
    for cnt in contours:
        if largest == None or cv2.contourArea(cnt) > cv2.contourArea(largest):
            largest = cnt

    peri = cv2.arcLength(largest, True)
    appr = cv2.approxPolyDP(largest, 0.02 * peri, True)

    #cv2.drawContours(im, appr, -1, (0,255,0), 3)
    points_list = [[i[0][0], i[0][1]] for i in appr] 

    left  = sorted(points_list, key = lambda p: p[0])[0:2]
    right = sorted(points_list, key = lambda p: p[0])[2:4]

    print("l " + str(left))
    print("r " + str(right))

    lu = sorted(left, key = lambda p: p[1])[0]
    ld = sorted(left, key = lambda p: p[1])[1]

    ru = sorted(right, key = lambda p: p[1])[0]
    rd = sorted(right, key = lambda p: p[1])[1]

    print("lu " + str(lu))
    print("ld " + str(ld))
    print("ru " + str(ru))
    print("rd " + str(rd))

    lu_ = [ (lu[0] + ld[0])/2, (lu[1] + ru[1])/2 ]
    ld_ = [ (lu[0] + ld[0])/2, (ld[1] + rd[1])/2 ]
    ru_ = [ (ru[0] + rd[0])/2, (lu[1] + ru[1])/2 ]
    rd_ = [ (ru[0] + rd[0])/2, (ld[1] + rd[1])/2 ]

    print("lu_ " + str(lu_))
    print("ld_ " + str(ld_))
    print("ru_ " + str(ru_))
    print("rd_ " + str(rd_))

    src_pts = np.float32(np.array([lu, ru, rd, ld]))
    dst_pts = np.float32(np.array([lu_, ru_, rd_, ld_]))

    h,w,b = im.shape
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    print("H" + str(H))

    imw =  cv2.warpPerspective(im, H, (w, h))

    return imw[lu_[1]:rd_[1], lu_[0]:rd_[0]] # cropping image
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def find_homography_normalized(p1,p2,robust=True):
    """Get best estimate of homography.

    Parameters:
    robust : bool, optional
        If set to True (default), use LMedS estimation. If set to False,
        use least squares.

    """
    p1,p2,N1,N2 = normalize_points(p1,p2)
    if robust:
        method=cv2.LMEDS
    else:
        method=0
    H_, inliers = cv2.findHomography(p1.astype('float32'),
            p2.astype('float32'),
            method=method)
    H = np.linalg.inv(N2).dot(H_.astype('float')).dot(N1)
    H /= H[2,2]
    return H

#
# Two simple wrappers to use the same interface
#
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def find_homography_unnormalized(p1,p2,robust=True):
    """Get best estimate of homography without normalization.

    Parameters:
    robust : bool, optional
        If set to True (default), use LMedS estimation. If set to False,
        use least squares.

    """
    if robust:
        method=cv2.LMEDS
    else:
        method=0
    H, inliers = cv2.findHomography(p1.astype('float32'),
            p2.astype('float32'),
            method=method)
    return H
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def checkAvailability(sift, tkp, tdes, matchimg):
    """

    :param sift:
    :param tkp:
    :param tdes:sift feature object, template keypoints, and template descriptor
    :param matchimg:
    :return:
    """

    qimg = cv2.imread(matchimg)
    qimggray = cv2.cvtColor(qimg,cv2.COLOR_BGR2GRAY)
    # kernel = np.ones((5,5), np.uint8)
    # qimggray = cv2.erode(qimggray, kernel, iterations=1)
    # ret,threshimg = cv2.threshold(qimggray,100,255,cv2.THRESH_BINARY)
    qkp,qdes = sift.detectAndCompute(qimggray, None)
    # plt.imshow(threshimg, 'gray'), plt.show()

    FLANN_INDEX_KDITREE=0
    index_params=dict(algorithm=FLANN_INDEX_KDITREE,tree=5)
    # FLANN_INDEX_LSH = 6
    # index_params = dict(algorithm=FLANN_INDEX_LSH,
    #                     table_number=12,  # 12
    #                     key_size=20,  # 20
    #                     multi_probe_level=2)  # 2
    search_params = dict(checks = 50)
    flann=cv2.FlannBasedMatcher(index_params,search_params)
    matches=flann.knnMatch(tdes,qdes,k=2)
    goodMatch=[]
    for m_n in matches:
        if len(m_n) != 2:
            continue
        m, n = m_n
        if(m.distance<0.75*n.distance):
            goodMatch.append(m)
    MIN_MATCH_COUNT = 30
    if (len(goodMatch) >= MIN_MATCH_COUNT):
        tp = []
        qp = []

        for m in goodMatch:
            tp.append(tkp[m.queryIdx].pt)
            qp.append(qkp[m.trainIdx].pt)

        tp, qp = np.float32((tp, qp))
        H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)

        h = timg.shape[0]
        w = timg.shape[1]
        trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]])
        queryBorder = cv2.perspectiveTransform(trainBorder, H)
        cv2.polylines(qimg, [np.int32(queryBorder)], True, (0, 255, 0), 5)
        cv2.imshow('result', qimg)
        plt.imshow(qimg, 'gray'), plt.show()
        return True
    else:
        print "Not Enough match found- %d/%d" % (len(goodMatch), MIN_MATCH_COUNT)
        return False
    # cv2.imshow('result', qimg)
    # if cv2.waitKey(10) == ord('q'):
    #     cv2.destroyAllWindows()
项目:videoseg    作者:pathak22    | 项目源码 | 文件源码
def frame_homography(totalPts, homTh):
    """
    Filter foreground points i.e. the outlier points found by fitting
    homography using RANSAC
    Input:
        totalPts: (numAllPoints, 4): x0, y0, x1, y1
        fgPts: (numAllPoints, 4): x0, y0, x1, y1
    """
    if totalPts.ndim != 2 or totalPts.shape[0] < 8 or homTh < 0:
        return totalPts

    import cv2
    p1 = totalPts[:, :2].astype('float')
    p2 = totalPts[:, 2:4].astype('float')
    _, status = cv2.findHomography(
        p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh)
    fgPts = totalPts[status[:, 0] == 0, :]
    return fgPts
项目:3Dreconstruction    作者:alyssaq    | 项目源码 | 文件源码
def find_correspondence_points(img1, img2):
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(
        cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY), None)
    kp2, des2 = sift.detectAndCompute(
        cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY), None)

    # Find point matches
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # Apply Lowe's SIFT matching ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append(m)

    src_pts = np.asarray([kp1[m.queryIdx].pt for m in good])
    dst_pts = np.asarray([kp2[m.trainIdx].pt for m in good])

    # Constrain matches to fit homography
    retval, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 100.0)
    mask = mask.ravel()

    # We select only inlier points
    pts1 = src_pts[mask == 1]
    pts2 = dst_pts[mask == 1]

    return pts1.T, pts2.T
项目:LensCalibrator    作者:1024jp    | 项目源码 | 文件源码
def _estimate_homography(image_points, ideal_points):
        """Find homography matrix.
        """
        fp = np.array(image_points)
        tp = np.array(ideal_points)
        H, _ = cv2.findHomography(fp, tp, 0)
        return H
项目:360-stabilizer    作者:MateusZitelli    | 项目源码 | 文件源码
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB):
    # compute the raw matches and initialize the list of actual
    # matches
    matcher = cv2.DescriptorMatcher_create("FlannBased")
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
    matches = []

    # loop over the raw matches
    for m in rawMatches:
      # ensure the distance is within a certain ratio of each
      # other (i.e. Lowe's ratio test)
      matches.append((m[0].trainIdx, m[0].queryIdx))

    # computing a homography requires at least 4 matches
    if len(matches) > 4:
      ptsA = np.float32([kpsA[i] for (_, i) in matches])
      ptsB = np.float32([kpsB[i] for (i, _) in matches])

      # compute the homography between the two sets of points
      (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
        self.reprojThresh)

      # return the matches along with the homograpy matrix
      # and status of each matched point
      return (matches, H, status)

    # otherwise, no homograpy could be computed
    return None
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def maskMouth(self, frame_image, face):
        elements = cv2.imread(self.__class__.mask_mouth_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_mouth_points, np.array(self.getMouthPoints(face)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))

        frame_image.paste(mask_elements, (0,0), mask_elements)
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def maskFace(self, frame_image, face):

        elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
        frame_image.paste(mask_elements, (0,0), mask_elements)
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def old_find_homography_normalized(p1,p2):
    """
    A small wrapper around cv2.getPerspectiveTransform, with normalization of
    point locations.

    """

    return cv2.findHomography(p1,p2,method=cv2.LMEDS)[0]

    mu1 = p1.mean(axis=0)
    std1 = p1.std(axis=0)
    mu2 = p2.mean(axis=0)
    std2 = p2.std(axis=0)

    p1_ = (p1 - mu1) / std1
    p2_ = (p2 - mu2) / std2

    H_ = cv2.findHomography(p1_,p2_,method=cv2.LMEDS)[0]
    A1 = np.array([[1.0/std1[0], 0.0, -mu1[0]/std1[0]],
                   [0, 1.0/std1[1], -mu1[1]/std1[1]],
                   [0,0,1.0]])
    A2inv = np.array([[std2[0], 0.0, mu2[0]],
                   [0, std2[1], mu2[1]],
                   [0,0,1.0]])
    H = A2inv.dot(H_).dot(A1)
    return H
项目:videoseg    作者:pathak22    | 项目源码 | 文件源码
def shot_homography(shotTracks, homTh):
    """
    Filter foreground points i.e. the outlier points found by fitting
    homography using RANSAC
    Input:
        shotTracks: (numFrames, numAllPoints, 2)
        fgTracks: (numFrames, numForegroundPoints, 2)
    """
    if shotTracks.ndim < 3 or shotTracks.shape[0] < 2 or homTh < 0:
        return shotTracks

    import cv2
    status = 1
    for i in range(1, shotTracks.shape[0]):
        if shotTracks[i - 1, 0, 2] > -1000:
            p1 = shotTracks[i - 1, :, 2:].astype('float')
        else:
            p1 = shotTracks[i - 1, :, :2].astype('float')
        p2 = shotTracks[i, :, :2].astype('float')
        _, new_status = cv2.findHomography(
            p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh)
        status = new_status * status

    fgTracks = shotTracks[:, status[:, 0] == 0, :]
    print(shotTracks.shape[0], shotTracks.shape[1], fgTracks.shape[1])
    return fgTracks
项目:BoxCars    作者:JakubSochor    | 项目源码 | 文件源码
def _unpack_side(img, origPoints, targetSize):
    origPoints = np.array(origPoints).reshape(-1,1,2)
    targetPoints = np.array([(0,0), (targetSize[0],0), (0, targetSize[1]), 
                             (targetSize[0], targetSize[1])]).reshape(-1,1,2).astype(origPoints.dtype)
    m, _ = cv2.findHomography(origPoints, targetPoints, 0)
    resultImage = cv2.warpPerspective(img, m, targetSize)
    return resultImage


#%%
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def homography(img1, img2, visualize=False):
    """
    Finds Homography matrix from Image1 to Image2.
        Two images should be a plane and can change in viewpoint

    :param img1: Source image
    :param img2: Target image
    :param visualize: Flag to visualize the matched pixels and Homography warping
    :return: Homography matrix. (or) Homography matrix, Visualization image - if visualize is True
    """
    sift = cv.xfeatures2d.SIFT_create()
    kp1, desc1 = sift.detectAndCompute(img1, None)
    kp2, desc2 = sift.detectAndCompute(img2, None)

    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=INDEX_PARAM_TREES)
    # number of times the trees in the index should be recursively traversed
    # Higher values gives better precision, but also takes more time
    sch_params = dict(checks=SCH_PARAM_CHECKS)
    flann = cv.FlannBasedMatcher(index_params, sch_params)

    matches = flann.knnMatch(desc1, desc2, k=2)
    logging.debug('{} matches found'.format(len(matches)))

    # select good matches
    matches_arr = []
    good_matches = []
    for m, n in matches:
        if m.distance < GOOD_MATCH_THRESHOLD * n.distance:
            good_matches.append(m)
        matches_arr.append(m)

    if len(good_matches) < MIN_MATCH_COUNT:
        raise (Exception('Not enough matches found'))
    else:
        logging.debug('{} of {} are good matches'.format(len(good_matches), len(matches)))

    src_pts = [kp1[m.queryIdx].pt for m in good_matches]
    src_pts = np.array(src_pts, dtype=np.float32).reshape((-1, 1, 2))
    dst_pts = [kp2[m.trainIdx].pt for m in good_matches]
    dst_pts = np.array(dst_pts, dtype=np.float32).reshape((-1, 1, 2))

    homo, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5)

    if visualize:
        res = visualize_homo(img1, img2, kp1, kp2, matches, homo, mask)
        return homo, res

    return homo
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getMatches(self, sceneImage):
        """
        sceneImage: ?????array??

        return dst: ????????

        """
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(self.MarkImage[:,:,0],None)
        kp2, des2 = sift.detectAndCompute(sceneImage[:,:,0],None)

        # create BFMatcher object
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        # Match descriptors.
        matches = flann.knnMatch(des1,des2,k=2)

        # Sort them in the order of their distance.
        good = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) < self.MIN_MATCH_COUNT:
            return None

        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
        matchesMask = mask.ravel().tolist()

        h,w = self.MarkImage.shape[:2]
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)

        draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                           singlePointColor = None,
                           matchesMask = matchesMask, # draw only inliers
                           flags = 2)

        self.SceneImage = sceneImage
        self.DrawParams = draw_params
        self.KP1 = kp1
        self.KP2 = kp2
        self.GoodMatches = good
        return dst
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getMatches(self, sceneImage):
        """
        sceneImage: ?????array??

        return dst: ????????

        """
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(self.MarkImage[:,:,0],None)
        kp2, des2 = sift.detectAndCompute(sceneImage[:,:,0],None)

        # create BFMatcher object
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        # Match descriptors.
        matches = flann.knnMatch(des1,des2,k=2)

        # Sort them in the order of their distance.
        good = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) < self.MIN_MATCH_COUNT:
            return None

        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
        matchesMask = mask.ravel().tolist()

        h,w = self.MarkImage.shape[:2]
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)

        draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                           singlePointColor = None,
                           matchesMask = matchesMask, # draw only inliers
                           flags = 2)

        self.SceneImage = sceneImage
        self.DrawParams = draw_params
        self.KP1 = kp1
        self.KP2 = kp2
        self.GoodMatches = good
        return dst
项目:tensorflow_homographynet    作者:linjian93    | 项目源码 | 文件源码
def homographyGeneration(raw_image, image_path, dataPath, pairs_per_img):
    img = cv2.resize(cv2.imread(image_path, 0),(320,240))
    with open(dataPath + 'homography_re' + '.txt', 'ab') as output_file_1, open(dataPath + 'homography_ab' + '.txt', 'ab') as output_file_2:

        random_list = []
        i = 1
        while i < pairs_per_img + 1:
            y_start = random.randint(32,80)
            y_end = y_start + 128
            x_start = random.randint(32,160)
            x_end = x_start + 128

            y_1 = y_start
            x_1 = x_start
            y_2 = y_end
            x_2 = x_start
            y_3 = y_end
            x_3 = x_end
            y_4 = y_start
            x_4 = x_end

            img_patch = img[y_start:y_end, x_start:x_end]  # patch 1

            y_1_offset = random.randint(-32,32)
            x_1_offset = random.randint(-32,32)
            y_2_offset = random.randint(-32,32)
            x_2_offset = random.randint(-32,32)
            y_3_offset = random.randint(-32,32)
            x_3_offset = random.randint(-32,32)
            y_4_offset = random.randint(-32,32)
            x_4_offset = random.randint(-32,32)

            y_1_p = y_1 + y_1_offset
            x_1_p = x_1 + x_1_offset
            y_2_p = y_2 + y_2_offset
            x_2_p = x_2 + x_2_offset
            y_3_p = y_3 + y_3_offset
            x_3_p = x_3 + x_3_offset
            y_4_p = y_4 + y_4_offset
            x_4_p = x_4 + x_4_offset

            pts_img_patch = np.array([[y_1,x_1],[y_2,x_2],[y_3,x_3],[y_4,x_4]]).astype(np.float32)
            pts_img_patch_perturb = np.array([[y_1_p,x_1_p],[y_2_p,x_2_p],[y_3_p,x_3_p],[y_4_p,x_4_p]]).astype(np.float32)
            h,status = cv2.findHomography(pts_img_patch, pts_img_patch_perturb, cv2.RANSAC)

            img_perburb = cv2.warpPerspective(img, h, (320, 240))
            img_perburb_patch = img_perburb[y_start:y_end, x_start:x_end]  # patch 2
            if not [y_1,x_1,y_2,x_2,y_3,x_3,y_4,x_4] in random_list:
                random_list.append([y_1,x_1,y_2,x_2,y_3,x_3,y_4,x_4])
                h_4pt_1 = np.array([y_1_offset,x_1_offset,y_2_offset,x_2_offset,y_3_offset,x_3_offset,y_4_offset,x_4_offset])
                h_4pt_2 = np.array([y_1_p,x_1_p,y_2_p,x_2_p,y_3_p,x_3_p,y_4_p,x_4_p])

                img_patch_path = os.path.join(dataPath, (raw_image.split('.')[0] + '_' + str(i) + '_1' +'.jpg'))
                cv2.imwrite(img_patch_path, img_patch)
                img_perburb_patch_path = os.path.join(dataPath, (raw_image.split('.')[0] + '_' + str(i) + '_2' +'.jpg'))
                cv2.imwrite(img_perburb_patch_path, img_perburb_patch)
                np.savetxt(output_file_1, h_4pt_1)
                np.savetxt(output_file_2, h_4pt_2)
                i += 1
项目:tensorflow_homographynet    作者:linjian93    | 项目源码 | 文件源码
def generate_data_test(img_path):
    data = []
    img = cv2.resize(cv2.imread(img_path, 0), (640, 480))

    y_start = random.randint(64, 160)
    y_end = y_start + 256
    x_start = random.randint(64, 320)
    x_end = x_start + 256

    y_1 = y_start
    x_1 = x_start
    y_2 = y_end
    x_2 = x_start
    y_3 = y_end
    x_3 = x_end
    y_4 = y_start
    x_4 = x_end

    img_patch = img[y_start:y_end, x_start:x_end]  # patch 1

    y_1_offset = random.randint(-64, 64)
    x_1_offset = random.randint(-64, 64)
    y_2_offset = random.randint(-64, 64)
    x_2_offset = random.randint(-64, 64)
    y_3_offset = random.randint(-64, 64)
    x_3_offset = random.randint(-64, 64)
    y_4_offset = random.randint(-64, 64)
    x_4_offset = random.randint(-64, 64)

    y_1_p = y_1 + y_1_offset
    x_1_p = x_1 + x_1_offset
    y_2_p = y_2 + y_2_offset
    x_2_p = x_2 + x_2_offset
    y_3_p = y_3 + y_3_offset
    x_3_p = x_3 + x_3_offset
    y_4_p = y_4 + y_4_offset
    x_4_p = x_4 + x_4_offset

    pts_img_patch = np.array([[y_1,x_1],[y_2,x_2],[y_3,x_3],[y_4,x_4]]).astype(np.float32)
    pts_img_patch_perturb = np.array([[y_1_p,x_1_p],[y_2_p,x_2_p],[y_3_p,x_3_p],[y_4_p,x_4_p]]).astype(np.float32)
    h, status = cv2.findHomography(pts_img_patch, pts_img_patch_perturb, cv2.RANSAC)

    img_perburb = cv2.warpPerspective(img, h, (640, 480))
    img_perburb_patch = img_perburb[y_start:y_end, x_start:x_end]  # patch 2

    data.append(img_patch)
    data.append(img_perburb_patch)

    h_4pt = np.array([y_1_offset,x_1_offset,y_2_offset,x_2_offset,y_3_offset,x_3_offset,y_4_offset,x_4_offset])
    h1 = np.array([y_1,x_1,y_2,x_2,y_3,x_3,y_4,x_4])

    return data, h_4pt, h1, img, img_perburb
项目:tensorflow_homographynet    作者:linjian93    | 项目源码 | 文件源码
def generate_data_train(img_path):
    data = []
    img = cv2.resize(cv2.imread(img_path, 0), (320, 240))

    y_start = random.randint(32, 80)
    y_end = y_start + 128
    x_start = random.randint(32, 160)
    x_end = x_start + 128

    y_1 = y_start
    x_1 = x_start
    y_2 = y_end
    x_2 = x_start
    y_3 = y_end
    x_3 = x_end
    y_4 = y_start
    x_4 = x_end

    img_patch = img[y_start:y_end, x_start:x_end]  # patch 1

    y_1_offset = random.randint(-32, 32)
    x_1_offset = random.randint(-32, 32)
    y_2_offset = random.randint(-32, 32)
    x_2_offset = random.randint(-32, 32)
    y_3_offset = random.randint(-32, 32)
    x_3_offset = random.randint(-32, 32)
    y_4_offset = random.randint(-32, 32)
    x_4_offset = random.randint(-32, 32)

    y_1_p = y_1 + y_1_offset
    x_1_p = x_1 + x_1_offset
    y_2_p = y_2 + y_2_offset
    x_2_p = x_2 + x_2_offset
    y_3_p = y_3 + y_3_offset
    x_3_p = x_3 + x_3_offset
    y_4_p = y_4 + y_4_offset
    x_4_p = x_4 + x_4_offset

    pts_img_patch = np.array([[y_1,x_1],[y_2,x_2],[y_3,x_3],[y_4,x_4]]).astype(np.float32)
    pts_img_patch_perturb = np.array([[y_1_p,x_1_p],[y_2_p,x_2_p],[y_3_p,x_3_p],[y_4_p,x_4_p]]).astype(np.float32)
    h, status = cv2.findHomography(pts_img_patch, pts_img_patch_perturb, cv2.RANSAC)

    img_perburb = cv2.warpPerspective(img, h, (320, 240))
    img_perburb_patch = img_perburb[y_start:y_end, x_start:x_end]  # patch 2

    data.append(img_patch)
    data.append(img_perburb_patch)

    h_4pt = np.array([y_1_offset,x_1_offset,y_2_offset,x_2_offset,y_3_offset,x_3_offset,y_4_offset,x_4_offset])
    h1 = np.array([y_1,x_1,y_2,x_2,y_3,x_3,y_4,x_4])

    return data, h_4pt, h1, img, img_perburb
项目:tensorflow_homographynet    作者:linjian93    | 项目源码 | 文件源码
def generate_data(img_path):
    data_re = []
    label_re = []
    random_list = []
    img = cv2.resize(cv2.imread(img_path, 0), (320, 240))
    i = 1
    while i < pairs_per_img + 1:
        data = []
        label = []
        y_start = random.randint(32, 80)
        y_end = y_start + 128
        x_start = random.randint(32, 160)
        x_end = x_start + 128

        y_1 = y_start
        x_1 = x_start
        y_2 = y_end
        x_2 = x_start
        y_3 = y_end
        x_3 = x_end
        y_4 = y_start
        x_4 = x_end

        img_patch = img[y_start:y_end, x_start:x_end]  # patch 1

        y_1_offset = random.randint(-32, 32)
        x_1_offset = random.randint(-32, 32)
        y_2_offset = random.randint(-32, 32)
        x_2_offset = random.randint(-32, 32)
        y_3_offset = random.randint(-32, 32)
        x_3_offset = random.randint(-32, 32)
        y_4_offset = random.randint(-32, 32)
        x_4_offset = random.randint(-32, 32)

        y_1_p = y_1 + y_1_offset
        x_1_p = x_1 + x_1_offset
        y_2_p = y_2 + y_2_offset
        x_2_p = x_2 + x_2_offset
        y_3_p = y_3 + y_3_offset
        x_3_p = x_3 + x_3_offset
        y_4_p = y_4 + y_4_offset
        x_4_p = x_4 + x_4_offset

        pts_img_patch = np.array([[y_1,x_1],[y_2,x_2],[y_3,x_3],[y_4,x_4]]).astype(np.float32)
        pts_img_patch_perturb = np.array([[y_1_p,x_1_p],[y_2_p,x_2_p],[y_3_p,x_3_p],[y_4_p,x_4_p]]).astype(np.float32)
        h,status = cv2.findHomography(pts_img_patch, pts_img_patch_perturb, cv2.RANSAC)

        img_perburb = cv2.warpPerspective(img, h, (320, 240))
        img_perburb_patch = img_perburb[y_start:y_end, x_start:x_end]  # patch 2
        if not [y_1,x_1,y_2,x_2,y_3,x_3,y_4,x_4] in random_list:
            data.append(img_patch)
            data.append(img_perburb_patch)  # [2, 128, 128]
            random_list.append([y_1,x_1,y_2,x_2,y_3,x_3,y_4,x_4])
            h_4pt = np.array([y_1_offset,x_1_offset,y_2_offset,x_2_offset,y_3_offset,x_3_offset,y_4_offset,x_4_offset])
            # h_4pt = np.array([y_1_p,x_1_p,y_2_p,x_2_p,y_3_p,x_3_p,y_4_p,x_4_p])  # labels
            label.append(h_4pt)  # [1, 8]
            i += 1
        data_re.append(data)  # [4, 2, 128, 128]
        label_re.append(label)  # [4, 1, 8]

    return data_re, label_re
项目:Roomba980-Python    作者:NickWaterton    | 项目源码 | 文件源码
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image
项目:image_stacking    作者:maitek    | 项目源码 | 文件源码
def stackImagesKeypointMatching(file_list):

    orb = cv2.ORB_create()

    # disable OpenCL to because of bug in ORB in OpenCV 3.1
    cv2.ocl.setUseOpenCL(False)

    stacked_image = None
    first_image = None
    first_kp = None
    first_des = None
    for file in file_list:
        print(file)
        image = cv2.imread(file,1)
        imageF = image.astype(np.float32) / 255

        # compute the descriptors with ORB
        kp = orb.detect(image, None)
        kp, des = orb.compute(image, kp)

        # create BFMatcher object
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        if first_image is None:
            # Save keypoints for first image
            stacked_image = imageF
            first_image = image
            first_kp = kp
            first_des = des
        else:
             # Find matches and sort them in the order of their distance
            matches = matcher.match(first_des, des)
            matches = sorted(matches, key=lambda x: x.distance)

            src_pts = np.float32(
                [first_kp[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
            dst_pts = np.float32(
                [kp[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

            # Estimate perspective transformation
            M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
            w, h, _ = imageF.shape
            imageF = cv2.warpPerspective(imageF, M, (h, w))
            stacked_image += imageF

    stacked_image /= len(file_list)
    stacked_image = (stacked_image*255).astype(np.uint8)
    return stacked_image

# ===== MAIN =====
# Read all files in directory
项目:papacamera    作者:340StarObserver    | 项目源码 | 文件源码
def matchFeatures(queryFeature, trainFeature, matcher):
    """
    match(...) function: match query image features and train image features.
    parameter:
        queryFeature: features of query image
        trainFeature: features of train image
        matcher:      feature matcher
        queryImage:   this is just for test to show the position of the found image which in the query image
                      , input query image data which has processed by cv2.imread().
    return:
        if found matched image ,return image name, otherwise return None.
    """
    queryKeypoints = queryFeature[0]
    queryDescriptors = queryFeature[1]

    trainKeypoints = trainFeature[0]
    trainDescriptors = trainFeature[1]
    trainImgSize = trainFeature[2]
    trainImgHeight = trainImgSize[0]
    trainImgWidth = trainImgSize[1]

    corners=numpy.float32([[0, 0], [trainImgWidth, 0], [trainImgWidth, trainImgHeight], [0, trainImgHeight]])
    raw_matches = matcher.knnMatch(trainDescriptors, queryDescriptors, 2)
    queryGoodPoints, trainGoodPoints = filter_matches(trainKeypoints, queryKeypoints, raw_matches)

    if len(queryGoodPoints) >= 4:
        H,status = cv2.findHomography(queryGoodPoints, trainGoodPoints, cv2.RANSAC, 5.0)
    else:
        H,status = None,None
    res=False
    obj_corners=None
    if H is not None:
        corners = corners.reshape(1, -1, 2)
        obj_corners = numpy.int32(cv2.perspectiveTransform(corners, H).reshape(-1, 2))
        is_polygon = ispolygon(obj_corners)
        if is_polygon:
            res=True
    del queryKeypoints
    del queryDescriptors
    del trainKeypoints
    del trainDescriptors
    del trainImgSize
    del corners
    del raw_matches
    del queryGoodPoints
    del trainGoodPoints
    del obj_corners
    return res
项目:LearnHash    作者:galad-loth    | 项目源码 | 文件源码
def TestKptMatch():    
    img1=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img1.ppm",cv2.IMREAD_COLOR)
    img2=cv2.imread("E:\\DevProj\\Datasets\\VGGAffine\\bark\\img2.ppm",cv2.IMREAD_COLOR)
    gray1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
    gray2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    gap_width=20
    black_gap=npy.zeros((img1.shape[0],gap_width),dtype=npy.uint8)

#    objSIFT = cv2.SIFT(500)
#    kpt1,desc1 = objSIFT.detectAndCompute(gray1,None) 
#    kpt2,desc2 = objSIFT.detectAndCompute(gray2,None) 
#    objMatcher=cv2.BFMatcher(cv2.NORM_L2)
#    matches=objMatcher.knnMatch(desc1,desc2,k=2)

    objORB = cv2.ORB(500)
    kpt1,desc1 = objORB.detectAndCompute(gray1,None) 
    kpt2,desc2 = objORB.detectAndCompute(gray2,None) 
    objMatcher=cv2.BFMatcher(cv2.NORM_HAMMING)
    matches=objMatcher.knnMatch(desc1,desc2,k=2)

    goodMatches=[]
    for bm1,bm2 in matches:
        if bm1.distance < 0.7*bm2.distance:
            goodMatches.append(bm1)

    if len(goodMatches)>10:
        ptsFrom = npy.float32([kpt1[bm.queryIdx].pt for bm in goodMatches]).reshape(-1,1,2)
        ptsTo = npy.float32([kpt2[bm.trainIdx].pt for bm in goodMatches]).reshape(-1,1,2)
        matH, matchMask = cv2.findHomography(ptsFrom, ptsTo, cv2.RANSAC,5.0)

    imgcnb=npy.concatenate((gray1,black_gap,gray2),axis=1)

    plt.figure(1,figsize=(15,6))
    plt.imshow(imgcnb,cmap="gray")
    idx=0
    for bm in goodMatches:
        if 1==matchMask[idx]:
            kptFrom=kpt1[bm.queryIdx]
            kptTo=kpt2[bm.trainIdx]
            plt.plot(kptFrom.pt[0],kptFrom.pt[1],"rs",
                     markerfacecolor="none",markeredgecolor="r",markeredgewidth=2)
            plt.plot(kptTo.pt[0]+img1.shape[1]+gap_width,kptTo.pt[1],"bo",
                     markerfacecolor="none",markeredgecolor="b",markeredgewidth=2)
            plt.plot([kptFrom.pt[0],kptTo.pt[0]+img1.shape[1]+gap_width],
                     [kptFrom.pt[1],kptTo.pt[1]],"g-",linewidth=2)
        idx+=1
    plt.axis("off")