Python cv2 模块,perspectiveTransform() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用cv2.perspectiveTransform()

项目:LogoDetectionInVideo    作者:nmemme    | 项目源码 | 文件源码
def bf_knnmatches( matches, img, kp1, kp2):
    MIN_MATCH_COUNT = 10
    # store all the good matches as per Lowe's ratio test.
    good = []
    dst = []
    if len(matches[0]) == 2:
        for m, n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            if M is not None:
                h, w = img.shape
                pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
                dst = cv2.perspectiveTransform(pts, M)
            else:
                dst = []
    else:
        dst = []
    return dst
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def draw_markers(img,markers):
    for m in markers:
        centroid = np.array(m['centroid'],dtype=np.float32)
        origin = np.array(m['verts'][0],dtype=np.float32)
        hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32)
        hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
        if m['id_confidence']>.9:
            cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True)
        else:
            cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True)
        # cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2)
        m_str = 'id: {:d}'.format(m['id'])
        org = origin.copy()
        # cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1)
        cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'id_confidence' in m:
            m_str = 'idc: {:.3f}'.format(m['id_confidence'])
            org += (0, 12)
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'loc_confidence' in m:
            m_str = 'locc: {:.3f}'.format(m['loc_confidence'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'frames_since_true_detection' in m:
            m_str = 'otf: {}'.format(m['frames_since_true_detection'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'opf_vel' in m:
            m_str = 'otf: {}'.format(m['opf_vel'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def visualize_homo(img1, img2, kp1, kp2, matches, homo, mask):
    h, w, d = img1.shape
    pts = [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]
    pts = np.array(pts, dtype=np.float32).reshape((-1, 1, 2))
    dst = cv.perspectiveTransform(pts, homo)

    img2 = cv.polylines(img2, [np.int32(dst)], True, [255, 0, 0], 3, 8)

    matches_mask = mask.ravel().tolist()
    draw_params = dict(matchesMask=matches_mask,
                       singlePointColor=None,
                       matchColor=(0, 255, 0),
                       flags=2)
    res = cv.drawMatches(img1, kp1, img2, kp2, matches, None, **draw_params)
    return res
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def get_mode_toggle(self,pos,img_shape):
        if self.detected and self.defined:
            x,y = pos
            frame = np.array([[[0,0],[1,0],[1,1],[0,1],[0,0]]],dtype=np.float32)
            frame = cv2.perspectiveTransform(frame,self.m_to_screen)
            text_anchor = frame.reshape((5,-1))[2]
            text_anchor[1] = 1-text_anchor[1]
            text_anchor *=img_shape[1],img_shape[0]
            text_anchor = text_anchor[0],text_anchor[1]-75
            surface_edit_anchor = text_anchor[0],text_anchor[1]+25
            marker_edit_anchor = text_anchor[0],text_anchor[1]+50
            if np.sqrt((x-surface_edit_anchor[0])**2 + (y-surface_edit_anchor[1])**2) <15:
                return 'surface_mode'
            elif np.sqrt((x-marker_edit_anchor[0])**2 + (y-marker_edit_anchor[1])**2) <15:
                return 'marker_mode'
            else:
                return None
        else:
            return None
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def get_full_flow(u_res,v_res,H,x=None,y=None,shape=None):
    """ Adds homography back into flow

    Parameters: x,y,u,v,H
    """

    if (shape is None) and (x is None or y is None):
        print('Please provide either x/y or the shape.')

    if x is None and y is None:
        x,y = np.meshgrid(np.arange(shape[1]),np.arange(shape[0]))

    x2 = x + u_res
    y2 = y + v_res
    pt2_ = np.c_[x2.ravel(),y2.ravel()].astype('float32')
    pt1_ = np.c_[x.ravel(),y.ravel()].astype('float32')
    uv_ = cv2.perspectiveTransform(pt2_[:,np.newaxis,:],H).squeeze() - pt1_
    return uv_[:,0].reshape(x.shape), uv_[:,1].reshape(x.shape)
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def checkAvailability(sift, tkp, tdes, matchimg):
    """

    :param sift:
    :param tkp:
    :param tdes:sift feature object, template keypoints, and template descriptor
    :param matchimg:
    :return:
    """

    qimg = cv2.imread(matchimg)
    qimggray = cv2.cvtColor(qimg,cv2.COLOR_BGR2GRAY)
    # kernel = np.ones((5,5), np.uint8)
    # qimggray = cv2.erode(qimggray, kernel, iterations=1)
    # ret,threshimg = cv2.threshold(qimggray,100,255,cv2.THRESH_BINARY)
    qkp,qdes = sift.detectAndCompute(qimggray, None)
    # plt.imshow(threshimg, 'gray'), plt.show()

    FLANN_INDEX_KDITREE=0
    index_params=dict(algorithm=FLANN_INDEX_KDITREE,tree=5)
    # FLANN_INDEX_LSH = 6
    # index_params = dict(algorithm=FLANN_INDEX_LSH,
    #                     table_number=12,  # 12
    #                     key_size=20,  # 20
    #                     multi_probe_level=2)  # 2
    search_params = dict(checks = 50)
    flann=cv2.FlannBasedMatcher(index_params,search_params)
    matches=flann.knnMatch(tdes,qdes,k=2)
    goodMatch=[]
    for m_n in matches:
        if len(m_n) != 2:
            continue
        m, n = m_n
        if(m.distance<0.75*n.distance):
            goodMatch.append(m)
    MIN_MATCH_COUNT = 30
    if (len(goodMatch) >= MIN_MATCH_COUNT):
        tp = []
        qp = []

        for m in goodMatch:
            tp.append(tkp[m.queryIdx].pt)
            qp.append(qkp[m.trainIdx].pt)

        tp, qp = np.float32((tp, qp))
        H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)

        h = timg.shape[0]
        w = timg.shape[1]
        trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]])
        queryBorder = cv2.perspectiveTransform(trainBorder, H)
        cv2.polylines(qimg, [np.int32(queryBorder)], True, (0, 255, 0), 5)
        cv2.imshow('result', qimg)
        plt.imshow(qimg, 'gray'), plt.show()
        return True
    else:
        print "Not Enough match found- %d/%d" % (len(goodMatch), MIN_MATCH_COUNT)
        return False
    # cv2.imshow('result', qimg)
    # if cv2.waitKey(10) == ord('q'):
    #     cv2.destroyAllWindows()
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def build_correspondance(self, visible_markers,camera_calibration,min_marker_perimeter,min_id_confidence):
        """
        - use all visible markers
        - fit a convex quadrangle around it
        - use quadrangle verts to establish perpective transform
        - map all markers into surface space
        - build up list of found markers and their uv coords
        """

        all_verts = [m['verts'] for m in visible_markers if m['perimeter']>=min_marker_perimeter]
        if not all_verts:
            return
        all_verts = np.array(all_verts,dtype=np.float32)
        all_verts.shape = (-1,1,2) # [vert,vert,vert,vert,vert...] with vert = [[r,c]]
        # all_verts_undistorted_normalized centered in img center flipped in y and range [-1,1]
        all_verts_undistorted_normalized = cv2.undistortPoints(all_verts, camera_calibration['camera_matrix'],camera_calibration['dist_coefs']*self.use_distortion)
        hull = cv2.convexHull(all_verts_undistorted_normalized,clockwise=False)

        #simplify until we have excatly 4 verts
        if hull.shape[0]>4:
            new_hull = cv2.approxPolyDP(hull,epsilon=1,closed=True)
            if new_hull.shape[0]>=4:
                hull = new_hull
        if hull.shape[0]>4:
            curvature = abs(GetAnglesPolyline(hull,closed=True))
            most_acute_4_threshold = sorted(curvature)[3]
            hull = hull[curvature<=most_acute_4_threshold]


        # all_verts_undistorted_normalized space is flipped in y.
        # we need to change the order of the hull vertecies
        hull = hull[[1,0,3,2],:,:]

        # now we need to roll the hull verts until we have the right orientation:
        # all_verts_undistorted_normalized space has its origin at the image center.
        # adding 1 to the coordinates puts the origin at the top left.
        distance_to_top_left = np.sqrt((hull[:,:,0]+1)**2+(hull[:,:,1]+1)**2)
        bot_left_idx = np.argmin(distance_to_top_left)+1
        hull = np.roll(hull,-bot_left_idx,axis=0)

        #based on these 4 verts we calculate the transformations into a 0,0 1,1 square space
        m_from_undistored_norm_space = m_verts_from_screen(hull)
        self.detected = True
        # map the markers vertices into the surface space (one can think of these as texture coordinates u,v)
        marker_uv_coords =  cv2.perspectiveTransform(all_verts_undistorted_normalized,m_from_undistored_norm_space)
        marker_uv_coords.shape = (-1,4,1,2) #[marker,marker...] marker = [ [[r,c]],[[r,c]] ]

        # build up a dict of discovered markers. Each with a history of uv coordinates
        for m,uv in zip (visible_markers,marker_uv_coords):
            try:
                self.markers[m['id']].add_uv_coords(uv)
            except KeyError:
                self.markers[m['id']] = Support_Marker(m['id'])
                self.markers[m['id']].add_uv_coords(uv)

        #average collection of uv correspondences accros detected markers
        self.build_up_status = sum([len(m.collected_uv_coords) for m in self.markers.values()])/float(len(self.markers))

        if self.build_up_status >= self.required_build_up:
            self.finalize_correnspondance()
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def img_to_ref_surface(self,pos):
        #convenience lines to allow 'simple' vectors (x,y) to be used
        shape = pos.shape
        pos.shape = (-1,1,2)
        new_pos = cv2.perspectiveTransform(pos,self.m_from_screen )
        new_pos.shape = shape
        return new_pos
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def ref_surface_to_img(self,pos):
        #convenience lines to allow 'simple' vectors (x,y) to be used
        shape = pos.shape
        pos.shape = (-1,1,2)
        new_pos = cv2.perspectiveTransform(pos,self.m_to_screen )
        new_pos.shape = shape
        return new_pos
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def map_datum_to_surface(d,m_from_screen):
        pos = np.array([d['norm_pos']]).reshape(1,1,2)
        mapped_pos = cv2.perspectiveTransform(pos , m_from_screen )
        mapped_pos.shape = (2)
        on_srf = bool((0 <= mapped_pos[0] <= 1) and (0 <= mapped_pos[1] <= 1))
        return {'topic':d['topic']+"_on_surface",'norm_pos':(mapped_pos[0],mapped_pos[1]),'confidence':d['confidence'],'on_srf':on_srf,'base_data':d }
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def move_vertex(self,vert_idx,new_pos):
        """
        this fn is used to manipulate the surface boundary (coordinate system)
        new_pos is in uv-space coords
        if we move one vertex of the surface we need to find
        the tranformation from old quadrangle to new quardangle
        and apply that transformation to our marker uv-coords
        """
        before = marker_corners_norm
        after = before.copy()
        after[vert_idx] = new_pos
        transform = cv2.getPerspectiveTransform(after,before)
        for m in self.markers.values():
            m.uv_coords = cv2.perspectiveTransform(m.uv_coords,transform)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def gl_draw_corners(self):
        """
        draw surface and markers
        """
        if self.detected:
            frame = cv2.perspectiveTransform(marker_corners_norm.reshape(-1,1,2),self.m_to_screen)
            draw_points_norm(frame.reshape((4,2)),20,RGBA(1.0,0.2,0.6,.5))



    #### fns to draw surface in seperate window
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def gl_display(self):
        """
        Display marker and surface info inside world screen
        """
        if self.mode == "Show Markers and Surfaces":
            for m in self.markers:
                hat = np.array([[[0,0],[0,1],[.5,1.3],[1,1],[1,0],[0,0]]],dtype=np.float32)
                hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
                if m['perimeter']>=self.min_marker_perimeter and m['id_confidence']>self.min_id_confidence:
                    draw_polyline(hat.reshape((6,2)),color=RGBA(0.1,1.,1.,.5))
                    draw_polyline(hat.reshape((6,2)),color=RGBA(0.1,1.,1.,.3),line_type=GL_POLYGON)
                else:
                    draw_polyline(hat.reshape((6,2)),color=RGBA(0.1,1.,1.,.5))

            for s in self.surfaces:
                if s not in self.edit_surfaces and s is not self.marker_edit_surface:
                    s.gl_draw_frame(self.img_shape)

            for s in self.edit_surfaces:
                s.gl_draw_frame(self.img_shape,highlight=True,surface_mode=True)
                s.gl_draw_corners()

            if self.marker_edit_surface:
                inc = []
                exc = []
                for m in self.markers:
                    if m['perimeter']>=self.min_marker_perimeter:
                        if m['id'] in self.marker_edit_surface.markers:
                            inc.append(m['centroid'])
                        else:
                            exc.append(m['centroid'])
                draw_points(exc,size=20,color=RGBA(1.,0.5,0.5,.8))
                draw_points(inc,size=20,color=RGBA(0.5,1.,0.5,.8))
                self.marker_edit_surface.gl_draw_frame(self.img_shape,color=(0.0,0.9,0.6,1.0),highlight=True,marker_mode=True)

        for s in self.surfaces:
            if self.locate_3d:
                s.gl_display_in_window_3d(self.g_pool.image_tex,self.camera_calibration)
            else:
                s.gl_display_in_window(self.g_pool.image_tex)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def do_unwarp(self, where):
        """ do the opencv2 unwarp """
        point = np.array([[where[0], where[1]]], dtype='float32')
        apoint = np.array([point])
        turn = cv2.perspectiveTransform(apoint, self.map_matrix)
        return self.m_row_zero + (turn[0][0][1] * self.m_per_row)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def correctPoints(self, pts):
        if not self._homography_is_fixed:
            self._homography = None
        h = self._homography
        if pts.ndim == 2:
            pts = pts.reshape(1, *pts.shape)
        return cv2.perspectiveTransform(pts.astype(np.float32), h)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def quadFromH(self):
        sy, sx = self.img.shape[:2]
        # image edges:
        objP = np.array([[
            [0, 0],
            [sx, 0],
            [sx, sy],
            [0, sy],
        ]], dtype=np.float32)
        return cv2.perspectiveTransform(objP, self._Hinv)
项目:football-stats    作者:dev-labs-bg    | 项目源码 | 文件源码
def windowToFieldCoordinates(basePoint, x1, y1, x2, y2, x3, y3, x4, y4, maxWidth=0, maxHeight=0):
    (xp, yp) = basePoint
    src = np.array([
        [x1, y1],
        [x2, y2],
        [x3, y3],
        [x4, y4]], dtype = "float32")

    # those should be the same aspect as the real width/height of field
    maxWidth = (x4-x1) if maxWidth == 0 else maxWidth
    maxHeight = (y1-y2) if maxHeight == 0 else maxHeight

    # make a destination rectangle with the width and height of above (starts at 0,0)
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    # find the transformation matrix for our transforms
    transformationMatrix = cv2.getPerspectiveTransform(src, dst)

    # put the original (source) x,y points in an array (not sure why do we have to put it 3 times though)    
    original = np.array([((xp, yp), (xp, yp), (xp, yp))], dtype=np.float32)

    # use perspectiveTransform to transform our original(mouse coords) to new coords with the transformation matrix
    transformed = cv2.perspectiveTransform(original, transformationMatrix)[0][0]

    return transformed
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def get_residual_flow(x,y,u,v,H):
    """ Removes homography from flow

    Parameters: x,y,u,v,H.
    """
    x2 = x + u
    y2 = y + v
    pt2_ = np.c_[x2.ravel(),y2.ravel()].astype('float32')
    pt1_ = np.c_[x.ravel(),y.ravel()].astype('float32')

    Hinv = np.linalg.inv(H)
    uv_ = cv2.perspectiveTransform(pt2_[:,np.newaxis,:],Hinv).squeeze() - pt1_
    return uv_[:,0].reshape(x.shape), uv_[:,1].reshape(x.shape)
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def homography2flow(x,y,H):
    """ Convert homography to flow field

    """
    pt1_ = np.c_[x.ravel(),y.ravel()].astype('float32')
    uv = cv2.perspectiveTransform(pt1_[:,np.newaxis,:],H).squeeze() - pt1_

    return uv[:,0].reshape(x.shape), uv[:,1].reshape(x.shape)
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def ptransform(xy, M):
    """ Just a small wrapper to cv2.perspectiveTransform
    """
    if xy.shape[1] == 2:
        return cv2.perspectiveTransform(xy[:,np.newaxis,:], M).squeeze()
    else:
        return cv2.perspectiveTransform(xy.T[:,np.newaxis,:], M).squeeze().T
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def gl_draw_frame(self,img_size,color = (1.0,0.2,0.6,1.0),highlight=False,surface_mode=False,marker_mode=False):
        """
        draw surface and markers
        """
        if self.detected:
            r,g,b,a = color
            frame = np.array([[[0,0],[1,0],[1,1],[0,1],[0,0]]],dtype=np.float32)
            hat = np.array([[[.3,.7],[.7,.7],[.5,.9],[.3,.7]]],dtype=np.float32)
            hat = cv2.perspectiveTransform(hat,self.m_to_screen)
            frame = cv2.perspectiveTransform(frame,self.m_to_screen)
            alpha = min(1,self.build_up_status/self.required_build_up)
            if highlight:
                draw_polyline_norm(frame.reshape((5,2)),1,RGBA(r,g,b,a*.1),line_type=GL_POLYGON)
            draw_polyline_norm(frame.reshape((5,2)),1,RGBA(r,g,b,a*alpha))
            draw_polyline_norm(hat.reshape((4,2)),1,RGBA(r,g,b,a*alpha))
            text_anchor = frame.reshape((5,-1))[2]
            text_anchor[1] = 1-text_anchor[1]
            text_anchor *=img_size[1],img_size[0]
            text_anchor = text_anchor[0],text_anchor[1]-75
            surface_edit_anchor = text_anchor[0],text_anchor[1]+25
            marker_edit_anchor = text_anchor[0],text_anchor[1]+50
            if self.defined:
                if marker_mode:
                    draw_points([marker_edit_anchor],color=RGBA(0,.8,.7))
                else:
                    draw_points([marker_edit_anchor])
                if surface_mode:
                    draw_points([surface_edit_anchor],color=RGBA(0,.8,.7))
                else:
                    draw_points([surface_edit_anchor])

                self.glfont.set_blur(3.9)
                self.glfont.set_color_float((0,0,0,.8))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'edit surface')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,'add/remove markers')
                self.glfont.set_blur(0.0)
                self.glfont.set_color_float((0.1,8.,8.,.9))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'edit surface')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,'add/remove markers')
            else:
                progress = (self.build_up_status/float(self.required_build_up))*100
                progress_text = '%.0f%%'%progress
                self.glfont.set_blur(3.9)
                self.glfont.set_color_float((0,0,0,.8))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'Learning affiliated markers...')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,progress_text)
                self.glfont.set_blur(0.0)
                self.glfont.set_color_float((0.1,8.,8.,.9))
                self.glfont.draw_text(text_anchor[0]+15,text_anchor[1]+6,self.marker_status())
                self.glfont.draw_text(surface_edit_anchor[0]+15,surface_edit_anchor[1]+6,'Learning affiliated markers...')
                self.glfont.draw_text(marker_edit_anchor[0]+15,marker_edit_anchor[1]+6,progress_text)
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getMatches(self, sceneImage):
        """
        sceneImage: ?????array??

        return dst: ????????

        """
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(self.MarkImage[:,:,0],None)
        kp2, des2 = sift.detectAndCompute(sceneImage[:,:,0],None)

        # create BFMatcher object
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        # Match descriptors.
        matches = flann.knnMatch(des1,des2,k=2)

        # Sort them in the order of their distance.
        good = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) < self.MIN_MATCH_COUNT:
            return None

        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
        matchesMask = mask.ravel().tolist()

        h,w = self.MarkImage.shape[:2]
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)

        draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                           singlePointColor = None,
                           matchesMask = matchesMask, # draw only inliers
                           flags = 2)

        self.SceneImage = sceneImage
        self.DrawParams = draw_params
        self.KP1 = kp1
        self.KP2 = kp2
        self.GoodMatches = good
        return dst
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def getMatches(self, sceneImage):
        """
        sceneImage: ?????array??

        return dst: ????????

        """
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(self.MarkImage[:,:,0],None)
        kp2, des2 = sift.detectAndCompute(sceneImage[:,:,0],None)

        # create BFMatcher object
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        # Match descriptors.
        matches = flann.knnMatch(des1,des2,k=2)

        # Sort them in the order of their distance.
        good = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
        if len(good) < self.MIN_MATCH_COUNT:
            return None

        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
        matchesMask = mask.ravel().tolist()

        h,w = self.MarkImage.shape[:2]
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)

        draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                           singlePointColor = None,
                           matchesMask = matchesMask, # draw only inliers
                           flags = 2)

        self.SceneImage = sceneImage
        self.DrawParams = draw_params
        self.KP1 = kp1
        self.KP2 = kp2
        self.GoodMatches = good
        return dst
项目:papacamera    作者:340StarObserver    | 项目源码 | 文件源码
def matchFeatures(queryFeature, trainFeature, matcher):
    """
    match(...) function: match query image features and train image features.
    parameter:
        queryFeature: features of query image
        trainFeature: features of train image
        matcher:      feature matcher
        queryImage:   this is just for test to show the position of the found image which in the query image
                      , input query image data which has processed by cv2.imread().
    return:
        if found matched image ,return image name, otherwise return None.
    """
    queryKeypoints = queryFeature[0]
    queryDescriptors = queryFeature[1]

    trainKeypoints = trainFeature[0]
    trainDescriptors = trainFeature[1]
    trainImgSize = trainFeature[2]
    trainImgHeight = trainImgSize[0]
    trainImgWidth = trainImgSize[1]

    corners=numpy.float32([[0, 0], [trainImgWidth, 0], [trainImgWidth, trainImgHeight], [0, trainImgHeight]])
    raw_matches = matcher.knnMatch(trainDescriptors, queryDescriptors, 2)
    queryGoodPoints, trainGoodPoints = filter_matches(trainKeypoints, queryKeypoints, raw_matches)

    if len(queryGoodPoints) >= 4:
        H,status = cv2.findHomography(queryGoodPoints, trainGoodPoints, cv2.RANSAC, 5.0)
    else:
        H,status = None,None
    res=False
    obj_corners=None
    if H is not None:
        corners = corners.reshape(1, -1, 2)
        obj_corners = numpy.int32(cv2.perspectiveTransform(corners, H).reshape(-1, 2))
        is_polygon = ispolygon(obj_corners)
        if is_polygon:
            res=True
    del queryKeypoints
    del queryDescriptors
    del trainKeypoints
    del trainDescriptors
    del trainImgSize
    del corners
    del raw_matches
    del queryGoodPoints
    del trainGoodPoints
    del obj_corners
    return res