Python cv2 模块,CV_AA 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用cv2.CV_AA

项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def draw_delaunay(im, subdiv, delaunay_color):

    triangleList = subdiv.getTriangleList()
    size = im.shape
    r = (0, 0, size[1], size[0])

    for t in triangleList:

        pt1 = (t[0], t[1])
        pt2 = (t[2], t[3])
        pt3 = (t[4], t[5])

        if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3):

            cv2.line(im, pt1, pt2, delaunay_color, 1, cv2.CV_AA, 0)
            cv2.line(im, pt2, pt3, delaunay_color, 1, cv2.CV_AA, 0)
            cv2.line(im, pt3, pt1, delaunay_color, 1, cv2.CV_AA, 0)

# Check if a point is inside a rectangle
项目:autonomous_driving    作者:StatueFungus    | 项目源码 | 文件源码
def draw_text(self, image, text, size, color, position):
        '''
            Zeichnet einen Text auf das Bild.

            Parameter
            ---------
            text : String
                Anzuzeigender Text
            size : Integer
                Groesse des Textes
            color : Tupel
                Farbe des Textes >> (255,0,0)
            position : Tupel
                Position des Textes >> (x,y)

        '''
        if imutils.is_cv2():
            cv2.putText(image, text, position, cv2.FONT_HERSHEY_COMPLEX, size, color, 2, cv2.CV_AA)
        elif imutils.is_cv3():
                cv2.putText(image, text, position, cv2.FONT_HERSHEY_COMPLEX, size, color, 2, cv2.LINE_AA)
项目:yolo_tensorflow    作者:hizhangp    | 项目源码 | 文件源码
def draw_result(self, img, result):
        for i in range(len(result)):
            x = int(result[i][1])
            y = int(result[i][2])
            w = int(result[i][3] / 2)
            h = int(result[i][4] / 2)
            cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
            cv2.rectangle(img, (x - w, y - h - 20),
                          (x + w, y - h), (125, 125, 125), -1)
            cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.CV_AA)
项目:Resnet-Emotion-Recognition    作者:safreita1    | 项目源码 | 文件源码
def process_image(self, roi_gray, img):
        image_scaled = np.array(cv2.resize(roi_gray, (48, 48)), dtype=float)
        image_processed = image_scaled.flatten()
        image_processed = image_processed.reshape([-1, 48, 48, 1])

        prediction = self.model.predict(image_processed)
        emotion = self.smooth_emotions(prediction)

        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(img, "Emotion: " + emotion, (50, 450), font, 1, (255, 255, 255), 2, cv2.CV_AA)
        cv2.imshow('img', img)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def annotate_bboxes(vis, bboxes, target_names): # , box_color=lambda target: (0, 200, 0) if UWRGBDDataset.get_category_name(target) != 'background' else (100, 100, 100)): 
        for bbox,target_name in izip(bboxes, target_names): 
            box_color = (0, 200, 0) # if UWRGBDDataset.get_category_name(target) != 'background' else (100, 100, 100)
            annotate_bbox(vis, bbox.coords, color=box_color, title=target_name.title().replace('_', ' '))

            # cv2.rectangle(vis, (bbox.coords[0], bbox.coords[1]), (bbox.coords[2], bbox.coords[3]), box_color, 2) 
            # cv2.rectangle(vis, (bbox.coords[0]-1, bbox.coords[1]-15), (bbox.coords[2]+1, bbox.coords[1]), box_color, -1)
            # cv2.putText(vis, '%s' % (), 
            #             (bbox[0], bbox[1]-5), 
            #             cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), thickness=1, lineType=cv2.CV_AA)
        return vis
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_epipolar_line(im_1, F_10, x_0, im_0=None): 
    """
    Plot the epipole and epipolar line F * x = 0.

    l[0] * x + l[1] * y + l[2] = 0
    @ x=0: y = -l[2] / l[1]
    @ x=W: y = (-l[2] -l[0]*W) / l[1]
    """

    H,W = im_1.shape[:2]
    lines_1 = epipolar_line(F_10, x_0)

    vis_1 = to_color(im_1)
    vis_0 = to_color(im_0) if im_0 is not None else None

    N = 20
    cols = get_color_by_label(np.arange(len(x_0)) % N) * 255
    # for tid, pt in zip(ids, pts): 
    #     cv2.circle(vis, tuple(map(int, pt)), 2, 
    #                tuple(map(int, cols[tid % N])) if colored else (0,240,0),
    #                -1, lineType=cv2.CV_AA)

    # col = (0,255,0)
    for col, l1 in zip(cols, lines_1):
        try: 
            x0, y0 = map(int, [0, -l1[2] / l1[1] ])
            x1, y1 = map(int, [W, -(l1[2] + l1[0] * W) / l1[1] ])
            cv2.line(vis_1, (x0,y0), (x1,y1), col, 1)
        except: 
            pass
            # raise RuntimeWarning('Failed to estimate epipolar line {:s}'.format(l1))

    if vis_0 is not None: 
        for col, x in zip(cols, x_0): 
            cv2.circle(vis_0, tuple(x), 3, col, -1)
        return np.vstack([vis_0, vis_1])

    return vis_1
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_features(im, pts, colors=None, size=2): 
    out = to_color(im)
    if colors is not None: 
        cols = colors.astype(np.int64)
    else: 
        cols = np.tile([0, 255, 0], (len(pts), 1)).astype(np.int64)

    for col, pt in zip(cols, pts): 
        tl = np.int32(pt - size)
        br = np.int32(pt + size)
        cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
        # cv2.circle(out, tuple(map(int, pt)), size, tuple(col), -1, lineType=cv2.CV_AA)
    return out
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def annotate_bbox(vis, coords, color=(0,200,0), title=''): 
    # Bounding Box and top header
    icoords = coords.astype(np.int32)
    cv2.rectangle(vis, (icoords[0], icoords[1]), (icoords[2], icoords[3]), color, 2)
    # cv2.rectangle(vis, (icoords[0]-1, icoords[1]-15), (icoords[2]+1, icoords[1]), color, -1)
    cv2.putText(vis, '{}'.format(title), (icoords[0], icoords[1]-5), 
                cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), thickness=1, lineType=cv2.CV_AA)
    return vis
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def create_mask(self, shape, pts): 
        """
        Create a mask image to prevent feature extraction around regions
        that already have features detected. i.e prevent feature crowding
        """

        mask = np.ones(shape=shape, dtype=np.uint8) * 255
        all_pts = np.vstack([self.aug_pts_, pts]) if hasattr(self, 'aug_pts_') \
                  else pts
        try: 
            for pt in all_pts: 
                cv2.circle(mask, tuple(map(int, pt)), self.mask_size_, 0, -1, lineType=cv2.CV_AA)
        except:
            pass
        return mask
项目:YOLO-Object-Detection-Tensorflow    作者:huseinzol05    | 项目源码 | 文件源码
def draw_result(img, result):
    for i in range(len(result)):
        x = int(result[i][1])
        y = int(result[i][2])
        w = int(result[i][3] / 2)
        h = int(result[i][4] / 2)
        cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
        cv2.rectangle(img, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
        cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.CV_AA)
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def draw_quads(self, img, quads, color = (0, 255, 0)):
        img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
        img_quads.shape = quads.shape[:2] + (2,) 
        for q in img_quads:
            cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)
项目:yolov2_tensorflow    作者:biyaa    | 项目源码 | 文件源码
def draw_result(img, result):
    for i in range(len(result)):
        left = int(result[i][1])
        right = int(result[i][2])
        top = int(result[i][3])
        bot = int(result[i][4])
        c = i%3
        color = 200*(c==0), 200*(c==1), 200*(c==2)
        cv2.rectangle(img, (left, top), (right, bot), (color), 5)
        cv2.rectangle(img, (left, top + 20),
                      (right, top+1), (color), -1)
        cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (left+ 15, top -7 + 20), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1, cv2.CV_AA)
项目:CartoonPy    作者:bxtkezhan    | 项目源码 | 文件源码
def headblur(clip,fx,fy,r_zone,r_blur=None):
    """
    Returns a filter that will blurr a moving part (a head ?) of
    the frames. The position of the blur at time t is
    defined by (fx(t), fy(t)), the radius of the blurring
    by ``r_zone`` and the intensity of the blurring by ``r_blur``.
    Requires OpenCV for the circling and the blurring.
    Automatically deals with the case where part of the image goes
    offscreen.
    """

    if r_blur is None: r_blur = 2*r_zone/3

    def fl(gf,t):

        im = gf(t)
        h,w,d = im.shape
        x,y = int(fx(t)),int(fy(t))
        x1,x2 = max(0,x-r_zone),min(x+r_zone,w)
        y1,y2 = max(0,y-r_zone),min(y+r_zone,h)
        region_size = y2-y1,x2-x1

        mask = np.zeros(region_size).astype('uint8')
        cv2.circle(mask, (r_zone,r_zone), r_zone, 255, -1,
                   lineType=cv2.CV_AA)

        mask = np.dstack(3*[(1.0/255)*mask])

        orig = im[y1:y2, x1:x2]
        blurred = cv2.blur(orig,(r_blur, r_blur))
        im[y1:y2, x1:x2] = mask*blurred + (1-mask)*orig
        return im

    return clip.fl(fl)



#------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
项目:CartoonPy    作者:bxtkezhan    | 项目源码 | 文件源码
def headblur(clip,fx,fy,r_zone,r_blur=None):
    """
    Returns a filter that will blurr a moving part (a head ?) of
    the frames. The position of the blur at time t is
    defined by (fx(t), fy(t)), the radius of the blurring
    by ``r_zone`` and the intensity of the blurring by ``r_blur``.
    Requires OpenCV for the circling and the blurring.
    Automatically deals with the case where part of the image goes
    offscreen.
    """

    if r_blur is None: r_blur = 2*r_zone/3

    def fl(gf,t):

        im = gf(t)
        h,w,d = im.shape
        x,y = int(fx(t)),int(fy(t))
        x1,x2 = max(0,x-r_zone),min(x+r_zone,w)
        y1,y2 = max(0,y-r_zone),min(y+r_zone,h)
        region_size = y2-y1,x2-x1

        mask = np.zeros(region_size).astype('uint8')
        cv2.circle(mask, (r_zone,r_zone), r_zone, 255, -1,
                   lineType=cv2.CV_AA)

        mask = np.dstack(3*[(1.0/255)*mask])

        orig = im[y1:y2, x1:x2]
        blurred = cv2.blur(orig,(r_blur, r_blur))
        im[y1:y2, x1:x2] = mask*blurred + (1-mask)*orig
        return im

    return clip.fl(fl)



#------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def Draw(self, display):
        if (self.pos[0] > 10000) or (self.pos[1] > 10000) or (self.pos[0] < -10000) or (self.pos[1] < -10000):
            return
        if self.drawtype == DRAWTYPE_POINT:  # Point
            # pygame.draw.circle(display, self.colour, self.pos, 0)
            # cv.Circle(display, (int(self.pos[0]), int(self.pos[1])), 0, self.colour)
            cv2.circle(display ,(int(self.pos[0]), int(self.pos[1])), 0, self.colour)

        elif self.drawtype == DRAWTYPE_CIRCLE:  # Circle
            # pygame.draw.circle(display, self.colour, self.pos, self.radius)
            # cv.Circle(display, (int(self.pos[0]), int(self.pos[1])), int(self.radius), self.colour, -1)
            cv2.circle(display ,(int(self.pos[0]), int(self.pos[1])), int(self.radius), self.colour, -1)

        elif self.drawtype == DRAWTYPE_LINE:
            if self.length == 0.0:
                # pygame.draw.circle(display, self.colour, self.pos, 0)
                # cv.Circle(display, (int(self.pos[0]), int(self.pos[1])), 0, self.colour)
                cv2.circle(display ,(int(self.pos[0]), int(self.pos[1])), 0, self.colour)
            else:
                velocitymagoverlength = math.sqrt(self.velocity[0]**2 + self.velocity[1]**2) / self.length
                linevec = [(self.velocity[0] / velocitymagoverlength), (self.velocity[1] / velocitymagoverlength)]
                endpoint = [self.pos[0] + linevec[0], self.pos[1] + linevec[1]]
                # pygame.draw.aaline(display, self.colour, self.pos, endpoint)
                # cv.Line(display, (int(self.pos[0]), int(self.pos[1])), (int(endpoint[0]), int(endpoint[1])), self.colour, lineType=cv.CV_AA)
                cv2.line(display, (int(self.pos[0]), int(self.pos[1])), (int(endpoint[0]), int(endpoint[1])), self.colour, lineType=cv2.CV_AA)

        elif self.drawtype == DRAWTYPE_SCALELINE:  # Scaling line (scales with velocity)
            endpoint = [self.pos[0] + self.velocity[0], self.pos[1] + self.velocity[1]]
            # pygame.draw.aaline(display, self.colour, self.pos, endpoint)
            # cv.Line(display, (int(self.pos[0]), int(self.pos[1])), (int(endpoint[0]), int(endpoint[1])), self.colour, lineType=cv.CV_AA)
            cv2.line(display, (int(self.pos[0]), int(self.pos[1])), (int(endpoint[0]), int(endpoint[1])), self.colour, lineType=cv2.CV_AA)

        elif self.drawtype == DRAWTYPE_BUBBLE:  # Bubble
            if self.radius >= 1.0:
                # pygame.draw.circle(display, self.colour, self.pos, self.radius, 1)
                # cv.Circle(display, (int(self.pos[0]), int(self.pos[1])), int(self.radius), self.colour, 1)
                cv2.circle(display ,(int(self.pos[0]), int(self.pos[1])), int(self.radius), self.colour, int(self.radius))
            else:  # Pygame won't draw circles with thickness < radius, so if radius is smaller than one don't bother trying to set thickness
                # pygame.draw.circle(display, self.colour, self.pos, self.radius)
                # cv.Circle(display, (int(self.pos[0]), int(self.pos[1])), int(self.radius), self.colour)
                cv2.circle(display ,(int(self.pos[0]), int(self.pos[1])), int(self.radius), self.colour)

        elif self.drawtype == DRAWTYPE_IMAGE:  # Image
            # size = self.image.get_size()
            # display.blit(self.image, (self.pos[0] - size[1], self.pos[1] - size[1]))
            size = self.image.shape
            x, y = self.pos[0] - size[1], self.pos[1] - size[1]
            # print x, y
            self.Paste(display, self.image, (x, y))
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def Draw(self, display):
        if not self.hascontacts:
            # W = display.get_width()
            # H = display.get_height()
            (W, H) = display.shape[1], display.shape[0]
            edgecontacts = []  # Where the line touches the screen edges

            if self.normal[0] == 0.0:
                edgecontacts = [[0, self.pos[1]], [W, self.pos[1]]]

            elif self.normal[1] == 0.0:
                edgecontacts = [[self.pos[0], 0], [self.pos[0], H]]

            else:
                pdotn = (self.pos[0] * self.normal[0]) + (self.pos[1] * self.normal[1])
                reciprocaln0 = (1.0 / self.normal[0])
                reciprocaln1 = (1.0 / self.normal[1])

                # Left-hand side of the screen
                pointl = [0, 0]
                pointl[1] = pdotn * reciprocaln1
                if (pointl[1] >= 0) and (pointl[1] <= H):
                    edgecontacts.append(pointl)

                # Top of the screen
                pointt = [0, 0]
                pointt[0] = pdotn * reciprocaln0
                if (pointt[0] >= 0) and (pointt[0] <= W):
                    edgecontacts.append(pointt)

                # Right-hand side of the screen
                pointr = [W, 0]
                pointr[1] = (pdotn - (W * self.normal[0])) * reciprocaln1
                if (pointr[1] >= 0) and (pointr[1] <= H):
                    edgecontacts.append(pointr)

                # Bottom of the screen
                pointb = [0, H]
                pointb[0] = (pdotn - (H * self.normal[1])) * reciprocaln0
                if (pointb[0] >= 0) and (pointb[0] <= W):
                    edgecontacts.append(pointb)

            self.edgecontacts = edgecontacts
            self.hascontacts = True

        # pygame.draw.aalines(display, self.colour, True, self.edgecontacts)
        # cv.Line(display, (int(self.edgecontacts[0][0]), int(self.edgecontacts[0][1])), (int(self.edgecontacts[1][0]), int(self.edgecontacts[1][1])), self.colour, lineType=cv.CV_AA)
        cv2.line(display, (int(self.edgecontacts[0][0]), int(self.edgecontacts[0][1])), (int(self.edgecontacts[1][0]), int(self.edgecontacts[1][1])), self.colour, lineType=cv2.CV_AA)
项目:deep-prior-pp    作者:moberweger    | 项目源码 | 文件源码
def plotJoints(self, ax, joint, color='nice', jcolor=None, annoscale=1):
        """
        Plot connected joints
        :param ax: axis to plot on
        :param joint: joints to connect
        :param color: line color
        """

        if joint.shape[0] >= numpy.max(self.jointConnections):
            for i in range(len(self.jointConnections)):
                if isinstance(ax, numpy.ndarray):
                    if color == 'nice':
                        lc = tuple((self.jointConnectionColors[i]*255.).astype(int))
                    elif color == 'gray':
                        lc = tuple((rgb_to_gray(self.jointConnectionColors[i])*255.).astype(int))
                    else:
                        lc = color
                    cv2.line(ax, (int(numpy.rint(joint[self.jointConnections[i][0], 0])),
                                  int(numpy.rint(joint[self.jointConnections[i][0], 1]))),
                             (int(numpy.rint(joint[self.jointConnections[i][1], 0])),
                              int(numpy.rint(joint[self.jointConnections[i][1], 1]))),
                             lc, thickness=3*annoscale, lineType=cv2.CV_AA)
                else:
                    if color == 'nice':
                        lc = self.jointConnectionColors[i]
                    elif color == 'gray':
                        lc = rgb_to_gray(self.jointConnectionColors[i])
                    else:
                        lc = color
                    ax.plot(numpy.hstack((joint[self.jointConnections[i][0], 0], joint[self.jointConnections[i][1], 0])),
                            numpy.hstack((joint[self.jointConnections[i][0], 1], joint[self.jointConnections[i][1], 1])),
                            c=lc, linewidth=3.0*annoscale)
        for i in range(joint.shape[0]):
            if isinstance(ax, numpy.ndarray):
                if jcolor == 'nice':
                    jc = tuple((self.jointColors[i]*255.).astype(int))
                elif jcolor == 'gray':
                    jc = tuple((rgb_to_gray(self.jointColors[i])*255.).astype(int))
                else:
                    jc = jcolor
                cv2.circle(ax, (int(numpy.rint(joint[i, 0])), int(numpy.rint(joint[i, 1]))), 6*annoscale,
                           jc, thickness=-1, lineType=cv2.CV_AA)
            else:
                if jcolor == 'nice':
                    jc = self.jointColors[i]
                elif jcolor == 'gray':
                    jc = rgb_to_gray(self.jointColors[i])
                else:
                    jc = jcolor

                ax.scatter(joint[i, 0], joint[i, 1], marker='o', s=100,
                           c=jc)
项目:BoarGL    作者:ivorjawa    | 项目源码 | 文件源码
def Testors():
    import cv, cv2

    class Testor(object):
        def __init__(self):
            #self.cap = cv2.VideoCapture(0)
            #self.cap.set(3,640)
            #self.cap.set(4,480)
            self.timestamp = time.time()
            self.font = cv2.FONT_HERSHEY_SIMPLEX
            self.t = Trawler()
        def loop(self):
            while(1):
                p = self.t.fish()
                if(p):
                    self.showbars(p)
                if cv2.waitKey( 10) == 27:
                    break
                #time.sleep(1/30)
        def showbars(self, pixels):
            self.now = time.time()
            self.timedelt = self.now - self.timestamp
            self.timestamp = self.now
            im = np.zeros((480,640,3), np.uint8)

            im2 = np.copy(im)

            mids = np.array(range(32))*20 + 10
            for i in range(len(pixels)):
                xoff = i*20
                pixcol = np.array(pixels[i])
                barheit = int((np.sum(pixcol)/3.0) * 400)
                #print "i, pixcol, barheit:  %d: %s, %d" % (i, str(pixcol), barheit)
                # opencv is bgr  rgb
                #           210  012
                #pixcol.reverse()
                pc = pixcol*255
                cv2.rectangle(im2, (0+xoff, 0), (20+xoff, 479), (255,255,255), 1)
                cv2.rectangle(im2, (0+xoff, 40), (20+xoff, 40+barheit), pc, cv.CV_FILLED)
                #if( i < 30):
                #    p1 = (mids[i], 480-int(amp_values[i]*460+10))
                #    p2 = (mids[i+1], 480-int(amp_values[i+1]*460+10))
                #    cv2.line(im2, p1, p2, (0,255,0))

            #print "[pixel list of %d][%s]" % (len(pixels), str(pixels[0]))

            delt = "FR: %0.1f, px: %d, %s, %s" % (1/self.timedelt, len(pixels), str(im2.shape), str(self.timestamp))
            #delt = "^: %0.1f, px: %d" % (timedelt, len(pixels))
            cv2.putText( im2, delt, (30, 20), self.font, .4,
                         (0, 0, 255), 1, cv2.CV_AA)
            cv2.imshow('show bars', im2)
    t = Testor()
    t.loop()