Python cv2 模块,polylines() 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用cv2.polylines()

项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def render_lane(image, corners, ploty, fitx, ):

    _,  src,  dst = perspective_transform(image, corners)
    Minv = cv2.getPerspectiveTransform(dst, src)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts = np.vstack((fitx,ploty)).astype(np.int32).T

    # Draw the lane onto the warped blank image
    #plt.plot(left_fitx, ploty, color='yellow')
    cv2.polylines(color_warp,  [pts],  False,  (0, 255, 0),  10)
    #cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 

    # Combine the result with the original image
    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)

    return result
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def get_points(event,x,y,flags,param):
    global lpnts,rpnts

    if event == cv2.EVENT_LBUTTONDOWN:
        lpnts = np.append(lpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [lpnts], False, (0, 0, 255))



    if event == cv2.EVENT_RBUTTONDOWN:
        rpnts = np.append(rpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [rpnts], False, (255, 0, 0))

        if rpnts.size>2:
            check(lpnts, rpnts[-1], rpnts[-2])



#check if the new point crosses a line
项目:masks-and-hats    作者:leoneckert    | 项目源码 | 文件源码
def drawlines(img, points):
    filler = cv2.convexHull(points)
    cv2.polylines(img, filler, True, (0, 0, 0), thickness=2)
    return img
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def get_points(event, x, y, flags, param):
    global lpnts, mode, counter, which_intersect

    if event == cv2.EVENT_LBUTTONDOWN:
        lpnts = np.append(lpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [lpnts], False, (0, 0, 255))
        if lpnts.size > 2:
            if mode == 0:

                #check(l1, lpnts[-1], lpnts[-2])
                if check(l1, lpnts[-1], lpnts[-2]):
                    which_intersect = 0
                    mode = 1
                #check(l2, lpnts[-1], lpnts[-2])
                if check(l2, lpnts[-1], lpnts[-2]):
                    which_intersect = 1
                    mode = 1

            elif mode == 1:

                counter += 1
                if check(lines[(which_intersect + 1) % 2], lpnts[-1], lpnts[-2]):
                    mode = 3
                    print counter


# check if the new point crosses a line
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def visualize(self, vis, colored=True): 

        try: 
            tids = set(self.ids)
        except: 
            return vis

        for hid, hbox in izip(self.ids, self.bboxes): 
            cv2.rectangle(vis, (hbox[0], hbox[1]), (hbox[2], hbox[3]), (0,255,0), 1)

        vis = super(BoundingBoxKLT, self).viz(vis, colored=colored)

        # for tid, pts in self.tm_.tracks.iteritems(): 
        #     if tid not in tids: continue
        #     cv2.polylines(vis, [np.vstack(pts.items).astype(np.int32)[-4:]], False, 
        #                   (0,255,0), thickness=1)
        #     tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
        #     cv2.rectangle(vis, (tl[0], tl[1]), (br[0], br[1]), (0,255,0), -1)

        # OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10)
        return vis
项目:spikefuel    作者:duguyue100    | 项目源码 | 文件源码
def draw_poly_box(frame, pts, color=[0, 255, 0]):
    """Draw polylines bounding box.

    Parameters
    ----------
    frame : OpenCV Mat
        A given frame with an object
    pts : numpy array
        consists of bounding box information with size (n points, 2)
    color : list
        color of the bounding box, the default is green

    Returns
    -------
    new_frame : OpenCV Mat
        A frame with given bounding box.
    """
    new_frame = frame.copy()
    temp_pts = np.array(pts, np.int32)
    temp_pts = temp_pts.reshape((-1, 1, 2))
    cv2.polylines(new_frame, [temp_pts], True, color, thickness=2)

    return new_frame
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]
        bgra = (self.b*255,self.g*255,self.r*255,self.a*255)
        if pts:
            pts = np.array([pts],dtype=np.int32)
            cv2.polylines(frame.img, pts, isClosed=False, color=bgra, thickness=self.thickness, lineType=cv2.LINE_AA)
项目:pe    作者:anguelos    | 项目源码 | 文件源码
def plotRectangles(rects,transcriptions,bgrImg,rgbCol):
    bgrCol=np.array(rgbCol)[[2,1,0]]
    res=bgrImg.copy()
    pts=np.empty([rects.shape[0],5,1,2])
    if rects.shape[1]==4:
        x=rects[:,[0,2,2,0,0]]
        y=rects[:,[1,1,3,3,1]]
    elif rects.shape[1]==8:
        x=rects[:,[0,2,4,6,0]]
        y=rects[:,[1,3,5,7,1]]
    else:
        raise Exception()
    pts[:,:,0,0]=x
    pts[:,:,0,1]=y
    pts=pts.astype('int32')
    ptList=[pts[k,:,:,:] for k in range(pts.shape[0])]
    if not (transcriptions is None):
        for rectNum in range(rects.shape[0]):
            res=cv2.putText(res,transcriptions[rectNum],(rects[rectNum,0],rects[rectNum,1]),1,cv2.FONT_HERSHEY_PLAIN,bgrCol)
    res=cv2.polylines(res,ptList,False,bgrCol)
    return res
项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def draw_polyline(im, landmarks):
    if faceOnly:
        print("faceOnly on")
        im[0:screenheight] = 0.0
    pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)
    pts = pts.reshape((-1, 1, 2))
    if len(landmarks):
        bottomRight = (max(landmarks[0][:, 0]), max(landmarks[0][:, 1]))
        topLeft = (min(landmarks[0][:, 0]), min(landmarks[0][:, 1]))
        rect = (0, 0, im.shape[1], im.shape[0])
        # FIXME: Delaunay triangulation.
        # subdiv = cv2.Subdiv2D(rect)
    for landmark in landmarks:
        for group in OVERLAY_GROUPS:
            ftrpoints = [landmark[group]]
            cv2.polylines(im, ftrpoints, False, (0, 255, 0), 1, 8)
            # FIXME: Delaunay triangulation.
            # for pt in ftrpoints:
            #     subdiv.insert(pt)
    # FIXME: Delaunay triangulation.
    # if len(landmarks):
    #     draw_delaunay( im, subdiv, (255,255,2555))
    return im
项目:motion-capture    作者:Vermeille    | 项目源码 | 文件源码
def draw_landmarks(frame, lds):
    #cv2.rectangle(frame, (0, 0), (frame.shape[1], frame.shape[0]), (0, 0, 0), -1)
    # Make the eyebrows into a nightmare
    cv2.fillPoly(frame, [lds.get('left_eyebrow')], (68, 54, 39, 128))
    cv2.fillPoly(frame, [lds.get('right_eyebrow')], (68, 54, 39, 128))

    cv2.fillPoly(frame, [lds.get('left_eye')], (150, 54, 39, 128))
    cv2.fillPoly(frame, [lds.get('right_eye')], (150, 54, 39, 128))

    # Gloss the lips
    cv2.fillPoly(frame, [lds.get('top_lip')], (150, 0, 0, 128))
    cv2.fillPoly(frame, [lds.get('bottom_lip')], (150, 0, 0, 128))

    cv2.fillPoly(frame, [lds.get('nose_bridge')], (150, 0, 0, 128))
    cv2.fillPoly(frame, [lds.get('nose_tip')], (150, 0, 0, 128))
    cv2.polylines(frame, [lds.get('chin')], False, (150, 0, 0, 128))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_hulls(im, hulls): 
    assert(isinstance(hulls, list))
    cv2.polylines(im, map(lambda hull: hull.astype(np.int32), hulls), 1, (0, 255, 0) if im.ndim == 3 else 255, thickness=1)       
    return im
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
        """
        color_type: {age, unique}
        """

        N = 20
        # inds = self.confident_tracks(min_length=min_track_length)
        # if not len(inds): 
        #     return

        # ids, pts = self.latest_ids[inds], self.latest_pts[inds]
        # lengths = self.tm_.lengths[inds]

        ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths

        if color_type == 'unique': 
            cwheel = colormap(np.linspace(0, 1, N))
            cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
        elif color_type == 'age': 
            cols = colormap(lengths)
        else: 
            raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))

        if not colored: 
            cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])

        for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): 
            cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, 
                          tuple(col), thickness=1)
            tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
            cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]
        bgra = (self.b*255,self.g*255,self.r*255,self.a*255)
        for pt in pts:
            lines =  np.array( [((pt[0]-self.inner,pt[1]),(pt[0]-self.outer,pt[1])),((pt[0]+self.inner,pt[1]),(pt[0]+self.outer,pt[1])) , ((pt[0],pt[1]-self.inner),(pt[0],pt[1]-self.outer)) , ((pt[0],pt[1]+self.inner),(pt[0],pt[1]+self.outer))],dtype=np.int32 )
            cv2.polylines(frame.img, lines, isClosed=False, color=bgra, thickness=self.thickness, lineType=cv2.LINE_AA)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def draw_markers(img,markers):
    for m in markers:
        centroid = np.array(m['centroid'],dtype=np.float32)
        origin = np.array(m['verts'][0],dtype=np.float32)
        hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32)
        hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
        if m['id_confidence']>.9:
            cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True)
        else:
            cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True)
        # cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2)
        m_str = 'id: {:d}'.format(m['id'])
        org = origin.copy()
        # cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1)
        cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'id_confidence' in m:
            m_str = 'idc: {:.3f}'.format(m['id_confidence'])
            org += (0, 12)
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'loc_confidence' in m:
            m_str = 'locc: {:.3f}'.format(m['loc_confidence'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'frames_since_true_detection' in m:
            m_str = 'otf: {}'.format(m['frames_since_true_detection'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'opf_vel' in m:
            m_str = 'otf: {}'.format(m['opf_vel'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def drawPolylines(img, pts, isClosed, color, width):
    img = cv2.polylines(img, pts, isClosed, color, width, cv2.LINE_AA)
项目:python-examples-cv    作者:tobybreckon    | 项目源码 | 文件源码
def draw_flow(img, flow, step=8):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
    fx, fy = flow[y,x].T
    lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis

#####################################################################

# define video capture object
项目:mask-generator    作者:antiboredom    | 项目源码 | 文件源码
def drawlines(img, points):
    filler = cv2.convexHull(points)
    cv2.polylines(img, filler, True, (0, 0, 0), thickness=2)
    return img
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def draw_detection(image):
    candies = detector.detect(image)
    for candy in candies:
        cv2.polylines(image, np.int32([np.array(candy.box_coords)]), isClosed=True, color=(0, 0, 255),
                      lineType=cv2.LINE_AA, thickness=3)
项目:CarLaneDetection    作者:leftthomas    | 项目源码 | 文件源码
def draw_roi(img, vertices):
    cv2.polylines(img, vertices, True, [255, 0, 0], thickness=2)
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def addRectangulars(self, frame_from, corners_arr):
        add_frame = np.zeros(frame_from.shape, np.uint8) 
        cv2.polylines(add_frame,[corners_arr],True,(0,255,255))
        frame = cv2.add(add_frame, frame_from)
        return frame
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def visualize_homo(img1, img2, kp1, kp2, matches, homo, mask):
    h, w, d = img1.shape
    pts = [[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]
    pts = np.array(pts, dtype=np.float32).reshape((-1, 1, 2))
    dst = cv.perspectiveTransform(pts, homo)

    img2 = cv.polylines(img2, [np.int32(dst)], True, [255, 0, 0], 3, 8)

    matches_mask = mask.ravel().tolist()
    draw_params = dict(matchesMask=matches_mask,
                       singlePointColor=None,
                       matchColor=(0, 255, 0),
                       flags=2)
    res = cv.drawMatches(img1, kp1, img2, kp2, matches, None, **draw_params)
    return res
项目:Two-Stream-Convolutional-Networks    作者:Yorwxue    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2, -1).astype(int)  # ????????????????????????16?reshape?2??array
    fx, fy = flow[y, x].T  # ???????????????
    lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)  # ????????????2*2???
    lines = np.int32(lines + 0.5)  # ????????????
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))  # ???????????????
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)  # ???????????????????
    return vis
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def visualize_span_points(name, small, span_points, corners):

    display = small.copy()

    for i, points in enumerate(span_points):

        points = norm2pix(small.shape, points, False)

        mean, small_evec = cv2.PCACompute(points.reshape((-1, 2)),
                                          None,
                                          maxComponents=1)

        dps = np.dot(points.reshape((-1, 2)), small_evec.reshape((2, 1)))
        dpm = np.dot(mean.flatten(), small_evec.flatten())

        point0 = mean + small_evec * (dps.min()-dpm)
        point1 = mean + small_evec * (dps.max()-dpm)

        for point in points:
            cv2.circle(display, fltp(point), 3,
                       CCOLORS[i % len(CCOLORS)], -1, cv2.LINE_AA)

        cv2.line(display, fltp(point0), fltp(point1),
                 (255, 255, 255), 1, cv2.LINE_AA)

    cv2.polylines(display, [norm2pix(small.shape, corners, True)],
                  True, (255, 255, 255))

    debug_show(name, 3, 'span points', display)
项目:Recognition    作者:thautwarm    | 项目源码 | 文件源码
def deal(self,frame):
        frame=frame.copy()
        track_window=self.track_window
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        roi_hist=self.roi_hist 
        dst = cv2.calcBackProject([frame],[0],roi_hist,[0,180],1)
        if self.m=='m':
            ret, track_window_r = cv2.meanShift(dst, track_window, term_crit)
            x,y,w,h = track_window_r
            img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        elif self.m=='c':
            ret, track_window_r = cv2.CamShift(dst, track_window, term_crit)


            pts = cv2.boxPoints(ret)
            pts = np.int0(pts)
            img2 = cv2.polylines(frame,[pts],True, 255,2)
        rectsNew=[]

        center1=(track_window[0]+track_window[2]//2,track_window[1]+track_window[3]//2)
        center2=(track_window_r[0]+track_window_r[2]//2,track_window_r[1]+track_window_r[3]//2)
        img2 = cv2.line(img2,center1,center2,color=0)
        rectsNew=track_window_r
#        x,y,w,h = track_window
#        img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        cv2.imshow('img2',img2)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
        return rectsNew
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def renderStarGauss(image, cov, mu, first, scale = 5):
    num_circles = 3
    num_points = 64

    cov = sqrtm(cov)

    num = num_circles * num_points
    pos = np.ones((num, 2))

    for c in range(num_circles):
        r = c + 1
        for p in range(num_points):
            angle = p / num_points * 2 * np.pi
            index = c * num_points + p

            x = r * np.cos(angle)
            y = r * np.sin(angle)

            pos[index, 0] = x * cov[0, 0] + y * cov[0, 1] + mu[0]
            pos[index, 1] = x * cov[1, 0] + y * cov[1, 1] + mu[1]

    #image = image.copy()
    #image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)

    if first:
        image = cv2.resize(image, (0, 0), None, scale, scale, cv2.INTER_NEAREST)

    for c in range(num_circles):
        pts = np.array(pos[c * num_points:(c + 1) * num_points, :] * scale + scale / 2, np.int32)
        pts = pts.reshape((-1,1,2))
        cv2.polylines(image, [pts], True, (255, 0, 0))

    return image
项目:pyhiro    作者:wanweiwei07    | 项目源码 | 文件源码
def checkAvailability(sift, tkp, tdes, matchimg):
    """

    :param sift:
    :param tkp:
    :param tdes:sift feature object, template keypoints, and template descriptor
    :param matchimg:
    :return:
    """

    qimg = cv2.imread(matchimg)
    qimggray = cv2.cvtColor(qimg,cv2.COLOR_BGR2GRAY)
    # kernel = np.ones((5,5), np.uint8)
    # qimggray = cv2.erode(qimggray, kernel, iterations=1)
    # ret,threshimg = cv2.threshold(qimggray,100,255,cv2.THRESH_BINARY)
    qkp,qdes = sift.detectAndCompute(qimggray, None)
    # plt.imshow(threshimg, 'gray'), plt.show()

    FLANN_INDEX_KDITREE=0
    index_params=dict(algorithm=FLANN_INDEX_KDITREE,tree=5)
    # FLANN_INDEX_LSH = 6
    # index_params = dict(algorithm=FLANN_INDEX_LSH,
    #                     table_number=12,  # 12
    #                     key_size=20,  # 20
    #                     multi_probe_level=2)  # 2
    search_params = dict(checks = 50)
    flann=cv2.FlannBasedMatcher(index_params,search_params)
    matches=flann.knnMatch(tdes,qdes,k=2)
    goodMatch=[]
    for m_n in matches:
        if len(m_n) != 2:
            continue
        m, n = m_n
        if(m.distance<0.75*n.distance):
            goodMatch.append(m)
    MIN_MATCH_COUNT = 30
    if (len(goodMatch) >= MIN_MATCH_COUNT):
        tp = []
        qp = []

        for m in goodMatch:
            tp.append(tkp[m.queryIdx].pt)
            qp.append(qkp[m.trainIdx].pt)

        tp, qp = np.float32((tp, qp))
        H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)

        h = timg.shape[0]
        w = timg.shape[1]
        trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]])
        queryBorder = cv2.perspectiveTransform(trainBorder, H)
        cv2.polylines(qimg, [np.int32(queryBorder)], True, (0, 255, 0), 5)
        cv2.imshow('result', qimg)
        plt.imshow(qimg, 'gray'), plt.show()
        return True
    else:
        print "Not Enough match found- %d/%d" % (len(goodMatch), MIN_MATCH_COUNT)
        return False
    # cv2.imshow('result', qimg)
    # if cv2.waitKey(10) == ord('q'):
    #     cv2.destroyAllWindows()
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def addRectangulars(frame_from, corners_arr):
    add_frame = np.zeros(frame_from.shape, np.uint8) 
    cv2.polylines(add_frame,[corners_arr],True,(0,255,255))
    frame = cv2.add(add_frame, frame_from)
    return frame
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def addRectangulars(frame_from, corners_arr):
    add_frame = np.zeros(frame_from.shape, np.uint8) 
    cv2.polylines(add_frame,[corners_arr],True,(0,255,255))
    frame = cv2.add(add_frame, frame_from)
    return frame
项目:OpenCV-Traffic-Counter    作者:alex-drake    | 项目源码 | 文件源码
def draw(self, output_image):
        for point in self.positions:
            cv2.circle(output_image, point, 2, (0, 0, 255), -1)
            cv2.polylines(output_image, [np.int32(self.positions)]
                , False, (0, 0, 255), 1)

# ============================================================================
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def renderCalibrationResult(self):
        num_circles = 9
        num_points = 64
        num = num_circles * num_points
        altaz = np.ones((num, 2))

        for c in range(num_circles):
            alt = c / 18 * np.pi
            for p in range(num_points):
                az = p / num_points * 2 * np.pi
                index = c * num_points + p
                altaz[index, 0] = alt
                altaz[index, 1] = az

        pos = self.calibrator.transform(altaz)
        inpos = self.renderer.altazToPos(altaz)

        image = self.calibrator.image.copy()
        sky = cv2.cvtColor(self.renderer.image.copy(), cv2.COLOR_GRAY2BGR)

        for c in range(num_circles):
            pts = np.array(pos[c * num_points:(c + 1) * num_points, :], np.int32)
            pts = pts.reshape((-1,1,2))
            cv2.polylines(image, [pts], True, (255, 0, 0))

            pts = np.array(inpos[c * num_points:(c + 1) * num_points, 0:2], np.int32)
            pts = pts.reshape((-1,1,2))
            cv2.polylines(sky, [pts], True, (255, 0, 0))

        correspondences = self.calibrator.getCurrentCorrespondences()

        for correspondence in correspondences:
            if correspondence.pos is not None:
                cv2.circle(image, correspondence.pos, self.circle_radius, self.selected_color)
            if correspondence.altaz is not None:
                altaz = correspondence.altaz

                pos = np.array(self.calibrator.transform(altaz), np.int32)[0] # np.array([np.array([a.radian for a in altaz])]) TODO
                pos = (pos[0], pos[1])

                cv2.circle(image, pos, self.circle_radius, self.marked_color)
                self.renderer.highlightStar(sky, correspondence.altaz, self.circle_radius, self.marked_color)

        cv2.imshow(self.image_window, image)
        cv2.imshow(self.sky_window, sky)