Python cv2 模块,putText() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.putText()

项目:Gender    作者:rabeter    | 项目源码 | 文件源码
def draw_rects(img, rects):
    """
    ?????????????
    :param img: 
    :param rects: 
    :return: 
    """
    for x, y, w, h in rects:
        cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 00), 2)
        face = img
        face = cv2.resize(face,(224,224))
        if Gender.predict(face)==1:
            text = "Male"
        else:
            text = "Female"
        cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
项目:facial_emotion_recognition    作者:adamaulia    | 项目源码 | 文件源码
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX

    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)

    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)

    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0)
项目:pedestrianSys    作者:PhilipChicco    | 项目源码 | 文件源码
def display_detected(self, frame, face_locs, people, confidence):
        """
        - Display ROI's of detected faces with labels
        :param frame:
        :param face_locs:
        :param people : people in image classified
        :param confidence : recognition confidence
        :return:
        """

        if not len(face_locs) == 0:  # nothing detected
            for (top, right, bottom, left), name, conf in zip(face_locs, people, confidence):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size
                top
                right
                bottom
                left

                # string
                conf_4f = "%.3f" % conf
                peop_conf = "{} {}%".format(name, float(conf_4f) * 100)

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

                # Draw a label with a name below the face
                # cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                cv2.rectangle(frame, (left, top + 20), (right, top), (0, 0, 255), cv2.FILLED)

                font = cv2.FONT_HERSHEY_DUPLEX  # color
                # cv2.putText(frame, peop_conf , (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
                cv2.putText(frame, peop_conf, (left, top + 15), font, 0.5, (255, 255, 255), 1)
        pass
项目:pe    作者:anguelos    | 项目源码 | 文件源码
def plotRectangles(rects,transcriptions,bgrImg,rgbCol):
    bgrCol=np.array(rgbCol)[[2,1,0]]
    res=bgrImg.copy()
    pts=np.empty([rects.shape[0],5,1,2])
    if rects.shape[1]==4:
        x=rects[:,[0,2,2,0,0]]
        y=rects[:,[1,1,3,3,1]]
    elif rects.shape[1]==8:
        x=rects[:,[0,2,4,6,0]]
        y=rects[:,[1,3,5,7,1]]
    else:
        raise Exception()
    pts[:,:,0,0]=x
    pts[:,:,0,1]=y
    pts=pts.astype('int32')
    ptList=[pts[k,:,:,:] for k in range(pts.shape[0])]
    if not (transcriptions is None):
        for rectNum in range(rects.shape[0]):
            res=cv2.putText(res,transcriptions[rectNum],(rects[rectNum,0],rects[rectNum,1]),1,cv2.FONT_HERSHEY_PLAIN,bgrCol)
    res=cv2.polylines(res,ptList,False,bgrCol)
    return res
项目:trackingtermites    作者:dmrib    | 项目源码 | 文件源码
def draw_termites(self):
        """Draw termites on simulation.

        Args:
            None.
        Returns:
            None.
        """
        for termite in self.termites:
            cv2.circle(self.background, termite.trail[self.current_step],
                       self.params['termite_radius'], termite.color, 2)
            cv2.circle(self.background, termite.trail[self.current_step],
                       2, termite.color, -1)
            cv2.putText(self.background, termite.number, (termite.trail[self.current_step][0] - 4,
                        termite.trail[self.current_step][1] - self.params['termite_radius'] - 5), 2,
                        color=termite.color, fontScale=0.4)
            cv2.circle(self.video_source.current_frame, termite.trail[self.current_step],
                       self.params['termite_radius'], termite.color, 2)
            cv2.circle(self.video_source.current_frame, termite.trail[self.current_step],
                       2, termite.color, -1)
            cv2.putText(self.video_source.current_frame, termite.number, (termite.trail[self.current_step][0] - 4,
                        termite.trail[self.current_step][1] - self.params['termite_radius'] - 5), 2,
                        color=termite.color, fontScale=0.4)
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'):
  assert form == 'center' or form == 'diagonal', \
      'bounding box format not accepted: {}.'.format(form)

  for bbox, label in zip(box_list, label_list):

    if form == 'center':
      bbox = bbox_transform(bbox)

    xmin, ymin, xmax, ymax = [int(b) for b in bbox]

    l = label.split(':')[0] # text before "CLASS: (PROB)"
    if cdict and l in cdict:
      c = cdict[l]
    else:
      c = color

    # draw box
    cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1)
    # draw label
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def add_text(img, text, text_top, image_scale):
    """
    Args:
        img (numpy array of shape (width, height, 3): input image
        text (str): text to add to image
        text_top (int): location of top text to add
        image_scale (float): image resize scale

    Summary:
        Add display text to a frame.

    Returns:
        Next available location of top text (allows for chaining this function)
    """
    cv2.putText(
        img=img,
        text=text,
        org=(0, text_top),
        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
        fontScale=0.15 * image_scale,
        color=(255, 255, 255))
    return text_top + int(5 * image_scale)
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def save_all_detection(im_array, detections, imdb_classes=None, thresh=0.7):
    """
    save all detections in one image with result.png
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param imdb_classes: list of names in imdb
    :param thresh: threshold for valid detections
    :return:
    """
    import random
    im = image_processing.transform_inverse(im_array, config.PIXEL_MEANS)
    im = im[:, :, ::-1].copy()  # back to b,g,r
    for j in range(1, len(imdb_classes)):
        color = (255*random.random(), 255*random.random(), 255*random.random())  # generate a random color
        dets = detections[j]
        for i in range(dets.shape[0]):
            bbox = dets[i, :4]
            score = dets[i, -1]
            if score > thresh:
                cv2.rectangle(im, (int(round(bbox[0])), int(round(bbox[1]))), 
                                (int(round(bbox[2])), int(round(bbox[3]))), color, 2)
                cv2.putText(im, '%s'%imdb_classes[j], (bbox[0], bbox[1]),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
    cv2.imwrite("result.jpg", im)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def draw_markers(img,markers):
    for m in markers:
        centroid = np.array(m['centroid'],dtype=np.float32)
        origin = np.array(m['verts'][0],dtype=np.float32)
        hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32)
        hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
        if m['id_confidence']>.9:
            cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True)
        else:
            cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True)
        # cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2)
        m_str = 'id: {:d}'.format(m['id'])
        org = origin.copy()
        # cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1)
        cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'id_confidence' in m:
            m_str = 'idc: {:.3f}'.format(m['id_confidence'])
            org += (0, 12)
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'loc_confidence' in m:
            m_str = 'locc: {:.3f}'.format(m['loc_confidence'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'frames_since_true_detection' in m:
            m_str = 'otf: {}'.format(m['frames_since_true_detection'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'opf_vel' in m:
            m_str = 'otf: {}'.format(m['opf_vel'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
项目:MLPractices    作者:carefree0910    | 项目源码 | 文件源码
def put_text(img, i, layers, y):
    """
    Put text on canvas
    :param img    : Canvas 
    :param i      : i-th hidden layer, notice that layers[i].name is the name of i-th hidden layer
    :param layers : Layers
    :param y      : (?, y) is the center of the neuron graph of i-th hidden layer 
    """
    ############################################################
    #                  Write your code here!                   #
    ############################################################

    cv2.putText(img, layers[i].name, (12, y - 36), cv2.LINE_AA, 0.6, (0, 0, 0), 1)

    ############################################################
    #                           End                            #
    ############################################################
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        self.last_frame_ts = frame.timestamp
        from player_methods import transparent_circle
        events['fixations'] = self.g_pool.fixations_by_frame[frame.index]
        if self.show_fixations:
            for f in self.g_pool.fixations_by_frame[frame.index]:
                x = int(f['norm_pos'][0]*self.img_size[0])
                y = int((1-f['norm_pos'][1])*self.img_size[1])
                transparent_circle(frame.img, (x,y), radius=f['pix_dispersion']/2, color=(.5, .2, .6, .7), thickness=-1)
                cv2.putText(
                    frame.img,
                    '{:d}'.format(f['id']),
                    (x+20,y),
                    cv2.FONT_HERSHEY_DUPLEX,
                    0.8,(255,150,100))
                # cv2.putText(frame.img,'%i - %i'%(f['start_frame_index'],f['end_frame_index']),(x,y), cv2.FONT_HERSHEY_DUPLEX,0.8,(255,150,100))
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        self.last_frame_ts = frame.timestamp
        from player_methods import transparent_circle
        events['fixations'] = self.g_pool.fixations_by_frame[frame.index]
        if self.show_fixations:
            for f in self.g_pool.fixations_by_frame[frame.index]:
                eye_id = f['eye_id']
                x = int(f['norm_pos'][0]*self.img_size[0])
                y = int((1-f['norm_pos'][1])*self.img_size[1])
                transparent_circle(frame.img, (x,y), radius=f['pix_dispersion']/2, color=(.5, .2, .6, .7), thickness=-1)
                cv2.putText(
                    frame.img,
                    '{:d} - eye {:d}'.format(f['id'], eye_id),
                    (x+20,y-5+30*eye_id),
                    cv2.FONT_HERSHEY_DUPLEX,
                    0.8,(255,150,100))
                # cv2.putText(frame.img,'%i - %i'%(f['start_frame_index'],f['end_frame_index']),(x,y), cv2.FONT_HERSHEY_DUPLEX,0.8,(255,150,100))
项目:face    作者:MOluwole    | 项目源码 | 文件源码
def DispID(x, y, w, h, NAME, Image):

    #  --------------------------------- THE POSITION OF THE ID BOX  ---------------------------------------------

    Name_y_pos = y - 10
    Name_X_pos = x + w/2 - (len(NAME)*7/2)

    if Name_X_pos < 0:
        Name_X_pos = 0
    elif (Name_X_pos +10 + (len(NAME) * 7) > Image.shape[1]):
          Name_X_pos= Name_X_pos - (Name_X_pos +10 + (len(NAME) * 7) - (Image.shape[1]))
    if Name_y_pos < 0:
        Name_y_pos = Name_y_pos = y + h + 10

 #  ------------------------------------    THE DRAWING OF THE BOX AND ID   --------------------------------------

    draw_box(Image, x, y, w, h)


    cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), (0,0,0), -2)           # Draw a Black Rectangle over the face frame
    cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), WHITE, 1) 
    cv2.putText(Image, NAME, (Name_X_pos, Name_y_pos - 10), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE)                         # Print the name of the ID
项目:face    作者:MOluwole    | 项目源码 | 文件源码
def DispID2(x, y, w, h, NAME, Image):

#  --------------------------------- THE POSITION OF THE ID BOX  -------------------------------------------------        

    Name_y_pos = y - 40
    Name_X_pos = x + w/2 - (len(NAME)*7/2)

    if Name_X_pos < 0:
        Name_X_pos = 0
    elif (Name_X_pos +10 + (len(NAME) * 7) > Image.shape[1]):
          Name_X_pos= Name_X_pos - (Name_X_pos +10 + (len(NAME) * 7) - (Image.shape[1]))
    if Name_y_pos < 0:
        Name_y_pos = Name_y_pos = y + h + 10

 #  ------------------------------------    THE DRAWING OF THE BOX AND ID   --------------------------------------
    cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), (0,0,0), -2)           # Draw a Black Rectangle over the face frame
    cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), WHITE, 1) 
    cv2.putText(Image, NAME, (Name_X_pos, Name_y_pos - 10), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE)                         # Print the name of the ID


# ---------------     THIRD ID BOX      ----------------------
项目:face    作者:MOluwole    | 项目源码 | 文件源码
def DispID3(x, y, w, h, NAME, Image):

#  --------------------------------- THE POSITION OF THE ID BOX  -------------------------------------------------        

    Name_y_pos = y - 70
    Name_X_pos = x + w/2 - (len(NAME)*7/2)

    if Name_X_pos < 0:
        Name_X_pos = 0
    elif (Name_X_pos +10 + (len(NAME) * 7) > Image.shape[1]):
          Name_X_pos= Name_X_pos - (Name_X_pos +10 + (len(NAME) * 7) - (Image.shape[1]))
    if Name_y_pos < 0:
        Name_y_pos = Name_y_pos = y + h + 10

 #  ------------------------------------    THE DRAWING OF THE BOX AND ID   --------------------------------------
    cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), (0,0,0), -2)           # Draw a Black Rectangle over the face frame
    cv2.rectangle(Image, (Name_X_pos-10, Name_y_pos-25), (Name_X_pos +10 + (len(NAME) * 7), Name_y_pos-1), WHITE, 1) 
    cv2.putText(Image, NAME, (Name_X_pos, Name_y_pos - 10), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE)                         # Print the name of the ID
项目:py-faster-rcnn-tk1    作者:joeking11829    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        #Create Rectangle and Text using OpenCV
        #print ('ClassName:', class_name, 'bbox:', bbox, 'score:' ,score)

        #Draw the Rectangle
        cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3)
        #Draw the Text
        cv2.putText(im, class_name + ' ' + str(score), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA)

        #Show Image
        #cv2.imshow("Detect Result", im)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def render(self, dst):
        t = self.t
        self.t += 1.0/30.0

        l = 120
        black = [245,245,245]
        white = [10,10,10]
        colors = [black,white]
        nsq = 0
        x = 0
        for xs in range(0,16):
            y = 0
            for ys in range(0,9):
                fg = colors[nsq%2]
                bg = colors[(nsq+1) % 2]
                dst[y:y+l,x:x+l] = bg
                cv2.putText(dst, "%s" % nsq, (x+l/4, y+2*l/3), cv2.FONT_HERSHEY_PLAIN, 3, [0,0,255], thickness = 2, lineType=cv2.LINE_AA)
                y+=l
                nsq+=1
            x+=l
项目:fcn    作者:wkentaro    | 项目源码 | 文件源码
def get_label_colortable(n_labels, shape):
    if cv2 is None:
        raise RuntimeError('get_label_colortable requires OpenCV (cv2)')
    rows, cols = shape
    if rows * cols < n_labels:
        raise ValueError
    cmap = label_colormap(n_labels)
    table = np.zeros((rows * cols, 50, 50, 3), dtype=np.uint8)
    for lbl_id, color in enumerate(cmap):
        color_uint8 = (color * 255).astype(np.uint8)
        table[lbl_id, :, :] = color_uint8
        text = '{:<2}'.format(lbl_id)
        cv2.putText(table[lbl_id], text, (5, 35),
                    cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 3)
    table = table.reshape(rows, cols, 50, 50, 3)
    table = table.transpose(0, 2, 1, 3, 4)
    table = table.reshape(rows * 50, cols * 50, 3)
    return table


# -----------------------------------------------------------------------------
# Evaluation
# -----------------------------------------------------------------------------
项目:canshi    作者:hungsing92    | 项目源码 | 文件源码
def click_and_crop(event, x, y, flags, param):
    global bbs, x_upper, id

    if event == cv2.EVENT_LBUTTONDOWN:
        if x_upper:
            bbs.append([x,y,0,0, 0,0,0,0])
        else:
            bbs[-1][4] = x
            bbs[-1][5] = y

    elif event == cv2.EVENT_LBUTTONUP:
        if x_upper:
            bbs[-1][2] = abs(x - bbs[-1][0])            
            bbs[-1][3] = abs(y - bbs[-1][1])
            bbs[-1][0] = min(x, bbs[-1][0])
            bbs[-1][1] = min(y, bbs[-1][1])
            cv2.rectangle(image, (bbs[-1][0],bbs[-1][1]), (bbs[-1][0]+bbs[-1][2],bbs[-1][1]+bbs[-1][3]), (0,0,255), 2)
            #cv2.putText(image, 'Upper %d' % id, (bbs[-1][0],bbs[-1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255))
        else:
            bbs[-1][6] = abs(x - bbs[-1][4])
            bbs[-1][7] = abs(y - bbs[-1][5])
            bbs[-1][4] = min(x, bbs[-1][4])
            bbs[-1][5] = min(y, bbs[-1][5])
            cv2.rectangle(image, (bbs[-1][4],bbs[-1][5]), (bbs[-1][4]+bbs[-1][6],bbs[-1][5]+bbs[-1][7]), (0,255,0), 2)
            cv2.putText(image, 'Body %d' % id, (bbs[-1][4],bbs[-1][5]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,255,0))


        cv2.imshow("image", image)        
        x_upper = not x_upper
项目:chainer-faster-rcnn    作者:mitmul    | 项目源码 | 文件源码
def draw_result(out, im_scale, clss, bbox, nms_thresh, conf):
    CV_AA = 16
    for cls_id in range(1, 21):
        _cls = clss[:, cls_id][:, np.newaxis]
        _bbx = bbox[:, cls_id * 4: (cls_id + 1) * 4]
        dets = np.hstack((_bbx, _cls))
        keep = nms(dets, nms_thresh)
        dets = dets[keep, :]

        inds = np.where(dets[:, -1] >= conf)[0]
        for i in inds:
            x1, y1, x2, y2 = map(int, dets[i, :4])
            cv.rectangle(out, (x1, y1), (x2, y2), (0, 0, 255), 2, CV_AA)
            ret, baseline = cv.getTextSize(
                CLASSES[cls_id], cv.FONT_HERSHEY_SIMPLEX, 0.8, 1)
            cv.rectangle(out, (x1, y2 - ret[1] - baseline),
                         (x1 + ret[0], y2), (0, 0, 255), -1)
            cv.putText(out, CLASSES[cls_id], (x1, y2 - baseline),
                       cv.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, CV_AA)

    return out
项目:MobileNet-SSD    作者:chuanqi305    | 项目源码 | 文件源码
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)

    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward()  
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)

    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True
项目:Gender    作者:rabeter    | 项目源码 | 文件源码
def draw_rects(img, rects, color):
    """
    ?????????????
    :param img: 
    :param rects: 
    :param color: 
    :return: 
    """
    for x, y, w, h in rects:
        face = img[x:x+w,y:y+h]
        face = cv2.resize(face,(224,224))
        if gender.predict(face)==1:
            text = "Male"
        else:
            text = "Female"
        cv2.rectangle(img, (x, y), (w, h), color, 2)
        cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), lineType=cv2.LINE_AA)
项目:yolo2-pytorch    作者:longcw    | 项目源码 | 文件源码
def draw_detection(im, bboxes, scores, cls_inds, cfg, thr=0.3):
    # draw image
    colors = cfg.colors
    labels = cfg.label_names

    imgcv = np.copy(im)
    h, w, _ = imgcv.shape
    for i, box in enumerate(bboxes):
        if scores[i] < thr:
            continue
        cls_indx = cls_inds[i]

        thick = int((h + w) / 300)
        cv2.rectangle(imgcv,
                      (box[0], box[1]), (box[2], box[3]),
                      colors[cls_indx], thick)
        mess = '%s: %.3f' % (labels[cls_indx], scores[i])
        cv2.putText(imgcv, mess, (box[0], box[1] - 12),
                    0, 1e-3 * h, colors[cls_indx], thick // 3)

    return imgcv
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def facial_landmark_detection(image, detector, predictor, file):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_size = gray.shape
    landmark_faces = detector(gray, 1)

    faces = list()
    area = 0
    face_idx = 0
    bItr = False
    for (idx, landmark_faces) in enumerate(landmark_faces):
        shape = predictor(gray, landmark_faces)
        shape = shape_to_np(shape)
        (x, y, w, h) = rect_to_bb(landmark_faces, img_size, file)

        if (w * h) > area:
            area = w * h
            faces = [x, y, w, h]
            bItr = True
        #cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
        #cv2.putText(image, "Face #{}".format(idx + 1), (x - 10, y - 10), \
        #           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        #for (x, y) in shape:
        #   cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

    return bItr, faces
项目:FPN    作者:xmyqsh    | 项目源码 | 文件源码
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image
项目:TFFRCNN    作者:InterVideo    | 项目源码 | 文件源码
def _draw_on_image(img, objs, class_sets_dict):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    for ind, obj in enumerate(objs):
        if obj['box'] is None: continue
        x1, y1, x2, y2 = obj['box'].astype(int)
        cls_id = class_sets_dict[obj['class']]
        if obj['class'] == 'dontcare':
            cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1)
            continue
        cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1)
        text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3])
        text += '{:.1f}|'.format(obj['truncation'])
        text += str(obj['occlusion'])
        cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1)
    return img
项目:ck-tensorflow    作者:ctuning    | 项目源码 | 文件源码
def my_draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, label_placement='bottom'):
    assert label_placement == 'bottom' or label_placement == 'top', \
        'label_placement format not accepted: {}.'.format(label_placement)

    for bbox, label in zip(box_list, label_list):

        xmin, ymin, xmax, ymax = [int(b) for b in bbox]

        l = label.split(':')[0] # text before "CLASS: (PROB)"
        if cdict and l in cdict:
            c = cdict[l]
        else:
            c = color

        # draw box
        cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1)
        # draw label
        font = cv2.FONT_HERSHEY_SIMPLEX
        if label_placement == 'bottom':
            cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
        else:
            cv2.putText(im, label, (xmin, ymin), font, 0.3, c, 1)
项目:tf-openpose    作者:ildoonet    | 项目源码 | 文件源码
def _show(self, path, inpmat, heatmat, pafmat, humans):
        image = cv2.imread(path)

        # CocoPoseLMDB.display_image(inpmat, heatmat, pafmat)

        image_h, image_w = image.shape[:2]
        heat_h, heat_w = heatmat.shape[:2]
        for _, human in humans.items():
            for part in human:
                if part['partIdx'] not in common.CocoPairsRender:
                    continue
                center1 = (int((part['c1'][0] + 0.5) * image_w / heat_w), int((part['c1'][1] + 0.5) * image_h / heat_h))
                center2 = (int((part['c2'][0] + 0.5) * image_w / heat_w), int((part['c2'][1] + 0.5) * image_h / heat_h))
                cv2.circle(image, center1, 2, (255, 0, 0), thickness=3, lineType=8, shift=0)
                cv2.circle(image, center2, 2, (255, 0, 0), thickness=3, lineType=8, shift=0)
                cv2.putText(image, str(part['partIdx'][1]), center2, cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 0, 0), 1)
                image = cv2.line(image, center1, center2, (255, 0, 0), 1)
        cv2.imshow('result', image)
        cv2.waitKey(0)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def patText(s0):
    '''make text pattern'''
    arr = np.zeros((s0,s0), dtype=np.uint8)
    s = int(round(s0/100.))
    p1 = 0
    pp1 = int(round(s0/10.))
    for pos0 in np.linspace(0,s0,10):
        cv2.putText(arr, 'helloworld', (p1,int(round(pos0))), 
                    cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=s,
                    color=255, thickness=s,
                    lineType=cv2.LINE_AA )
        if p1:
            p1 = 0
        else:
            p1 = pp1
    return arr.astype(float)
项目:Course-Projects    作者:manujagobind    | 项目源码 | 文件源码
def display_shape():
    global shape
    if shape == 0:
        cv2.putText(obj, 'Off', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
    elif shape == 1:
        cv2.putText(obj, 'Pencil', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
    elif shape == 2:
        cv2.putText(obj, 'Brush', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
    elif shape == 3:
        cv2.putText(obj, 'Eraser', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
    elif shape == 4:
        cv2.putText(obj, 'Line', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
    elif shape == 5:
        cv2.putText(obj, 'Rectangle', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
    elif shape == 6:
        cv2.putText(obj, 'Circle', (50, 250), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
    shape = img.shape
    for i in range(bboxes.shape[0]):
        bbox = bboxes[i]
        color = colors[classes[i]]
        # Draw bounding box...
        p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
        p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
        cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
        # Draw text...
        s = '%s/%.3f' % (classes[i], scores[i])
        p1 = (p1[0]-5, p1[1])
        cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)


# =========================================================================== #
# Matplotlib show...
# =========================================================================== #
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def drawAxis(camera_parameters, markers, frame):
    axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
    mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff

    for marker in markers:
        rvec, tvec = marker.rvec, marker.tvec
        imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
        corners = marker.corners
        corner = tuple(corners[0].ravel())
        cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
        cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
        cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def drawAxis(camera_parameters, markers, frame):
    axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
    mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff

    for marker in markers:
        rvec, tvec = marker.rvec, marker.tvec
        imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
        corners = marker.corners
        corner = tuple(corners[0].ravel())
        cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
        cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
        cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
项目:BAR4Py    作者:bxtkezhan    | 项目源码 | 文件源码
def drawAxis(camera_parameters, markers, frame):
    axis = np.float32([[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
    mtx, dist = camera_parameters.camera_matrix, camera_parameters.dist_coeff

    for marker in markers:
        rvec, tvec = marker.rvec, marker.tvec
        imgpts, jac = cv2.projectPoints(axis, rvec, tvec, mtx, dist)
        corners = marker.corners
        corner = tuple(corners[0].ravel())
        cv2.line(frame, corner, tuple(imgpts[0].ravel()), (0,0,255), 2)
        cv2.line(frame, corner, tuple(imgpts[1].ravel()), (0,255,0), 2)
        cv2.line(frame, corner, tuple(imgpts[2].ravel()), (255,0,0), 2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, 'X', tuple(imgpts[0].ravel()), font, 0.5, (0,0,255), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Y', tuple(imgpts[1].ravel()), font, 0.5, (0,255,0), 2, cv2.LINE_AA)
        cv2.putText(frame, 'Z', tuple(imgpts[2].ravel()), font, 0.5, (255,0,0), 2, cv2.LINE_AA)
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def process_image(self, inImg):
        (self.frame_width, self.frame_height) = (112, 92)        
        frame = cv2.flip(inImg,1,0)
        grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 
        cropped = cv2.resize(grayImg, (grayImg.shape[1] / self.size, grayImg.shape[0] / self.size))        
        faces = self.haar_cascade.detectMultiScale(cropped)
        faces = sorted(faces, key=lambda x: x[3])  
        if faces:
            face_i = faces[0] 
            x = face_i[0] * self.size
            y = face_i[1] * self.size
            w = face_i[2] * self.size
            h = face_i[3] * self.size
            face = grayImg[y:y + h, x:x + w]
            face_resize = cv2.resize(face, (self.frame_width, self.frame_height))
            img_no = sorted([int(fn[:fn.find('.')]) for fn in os.listdir(self.path) if fn[0]!='.' ]+[0])[-1] + 1
            if self.count % self.cp_rate == 0:
                cv2.imwrite('%s/%s.png' % (self.path, img_no), face_resize)
                print "Captured Img: ", self.count/self.cp_rate + 1
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
            cv2.putText(frame, self.face_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1,(0, 255, 0))            
            self.count += 1 
        return frame
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def process_image(self, inImg):
        (self.frame_width, self.frame_height) = (112, 92)        
        frame = cv2.flip(inImg,1,0)
        grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 
        cropped = cv2.resize(grayImg, (grayImg.shape[1] / self.size, grayImg.shape[0] / self.size))        
        faces = self.haar_cascade.detectMultiScale(cropped)
        faces = sorted(faces, key=lambda x: x[3])  
        if faces:
            face_i = faces[0] 
            x = face_i[0] * self.size
            y = face_i[1] * self.size
            w = face_i[2] * self.size
            h = face_i[3] * self.size
            face = grayImg[y:y + h, x:x + w]
            face_resize = cv2.resize(face, (self.frame_width, self.frame_height))
            img_no = sorted([int(fn[:fn.find('.')]) for fn in os.listdir(self.path) if fn[0]!='.' ]+[0])[-1] + 1
            if self.count % self.cp_rate == 0:
                cv2.imwrite('%s/%s.png' % (self.path, img_no), face_resize)
                print "Captured Img: ", self.count/self.cp_rate + 1
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
            cv2.putText(frame, self.face_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1,(0, 255, 0))            
            self.count += 1 
        return frame
项目:projectoxford    作者:zooba    | 项目源码 | 文件源码
def _renderResultOnImage(self, result, arr):
        """
            Draws boxes and text representing each face's emotion.
        """

        import operator, cv2

        img = cv2.cvtColor(cv2.imdecode(arr, -1), cv2.COLOR_BGR2RGB)

        for currFace in result:
            faceRectangle = currFace['faceRectangle']
            cv2.rectangle(img,(faceRectangle['left'],faceRectangle['top']),
                               (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
                               color = (255,0,0), thickness = 5)

        for currFace in result:
            faceRectangle = currFace['faceRectangle']
            currEmotion = max(iter(currFace['scores'].items()), key=operator.itemgetter(1))[0]

            textToWrite = '{0}'.format(currEmotion)
            cv2.putText(img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1)

        return img
项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def lipSegment(self, img):
        # self.t1 = cv2.getTickCount()
        lipHull = self.dlib_obj.get_landmarks(img)
        cv2.drawContours(img, lipHull, -1, (255, 0, 0), 2)
        (x, y), (MA, ma), angle = cv2.fitEllipse(lipHull)
        a = ma/2
        b = MA/2

        eccentricity = sqrt(pow(a, 2)-pow(b, 2))
        eccentricity = round(eccentricity/a, 2)

        cv2.putText(img, 'E = '+str(round(eccentricity, 3)), (10, 350),
                    self.font, 1, (255, 0, 0), 1)

        if(eccentricity < 0.9):
            self.flags.cmd = 'b'
        else:
            self.flags.cmd = 'f'

        if angle < 80:
            self.flags.cmd = 'l'
        elif angle > 100:
            self.flags.cmd = 'r'

        cv2.putText(img, 'Cmd = ' + self.flags.cmd, (10, 300),  self.font,  1,
                    (0, 0, 255), 1, 16)
        # self.t2 = cv2.getTickCount()
        # print "Time = ", (self.t2-self.t1)/cv2.getTickFrequency()
        return img
项目:Face-recognition-test    作者:jiangwei1995910    | 项目源码 | 文件源码
def photoRead(filename):
    frame = cv2.imread(filename)

    FaceArray = getFaceArray(frame)

    for r in FaceArray:
        img2 = cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
        img3 = frame[r[1]:r[3], r[0]:r[2]]  # ?????????????
        feature = Tools.get_feature(img3)
        name = readFace(feature)
        font = cv2.FONT_HERSHEY_SIMPLEX
        img2 = cv2.putText(img2, name, (r[1], r[3]), font, 1, (255, 255, 255), 2)


    cv2.imshow('frame', frame)
    cv2.waitKey(0)
项目:Face-recognition-test    作者:jiangwei1995910    | 项目源码 | 文件源码
def start():
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        FaceArray=getFaceArray(frame)
        img2=frame
        for r in FaceArray :
            img2=cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
            img3 = frame[r[1]:r[3], r[0]:r[2]]  # ?????????????
            feature=Tools.get_feature(img3)
            name=readFace(feature)
            font=cv2.FONT_HERSHEY_SIMPLEX
            img2= cv2.putText(img2,name,(r[1],r[3]), font, 1,(255,255,255),2)

        cv2.imshow('frame',img2)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
项目:DubHacks2016    作者:joseph-zhong    | 项目源码 | 文件源码
def renderResultOnImage( result, img ):

  """Display the obtained results onto the input image"""

  for currFace in result:
    faceRectangle = currFace['faceRectangle']
    cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']),
                       (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
                   color = (255,0,0), thickness = 5 )


  for currFace in result:
    faceRectangle = currFace['faceRectangle']
    currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]


    textToWrite = "%s" % ( currEmotion )
    cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 )

# img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
项目:DubHacks2016    作者:joseph-zhong    | 项目源码 | 文件源码
def renderResultOnImage( result, img ):

  """Display the obtained results onto the input image"""

  for currFace in result:
    faceRectangle = currFace['faceRectangle']
    cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']),
                       (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
                   color = (255,0,0), thickness = 5 )


  for currFace in result:
    faceRectangle = currFace['faceRectangle']
    currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]


    textToWrite = "%s" % ( currEmotion )
    cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 )

# img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
项目:DubHacks2016    作者:joseph-zhong    | 项目源码 | 文件源码
def renderResultOnImage( result, img ):

  """Display the obtained results onto the input image"""

  for currFace in result:
    faceRectangle = currFace['faceRectangle']
    cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']),
                       (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']),
                   color = (255,0,0), thickness = 5 )


  for currFace in result:
    faceRectangle = currFace['faceRectangle']
    currEmotion = max(currFace['scores'].items(), key=operator.itemgetter(1))[0]


    textToWrite = "%s" % ( currEmotion )
    cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 1 )

# img = os.path.expanduser('~/Development/sentiEdu/learning/confusedImgs/4.jpg')
项目:faster_rcnn_logo    作者:romyny    | 项目源码 | 文件源码
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    l_bboxes = []    
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        print ('Det: (x_min,y_min,W,H) = ({},{},{},{}), class_name = {:s}, score = {:.3f}').format(
                int(bbox[0]),int(bbox[1]),int(bbox[2]-bbox[0]),int(bbox[3]-bbox[1]),class_name,score)
        cv2.rectangle(im, (bbox[0], bbox[3]),(bbox[2],bbox[1]), (0,255,0),2)      
        cv2.putText(im,'{:s}:{:.3f}'.format(class_name, score),
                (int(bbox[0]), int(bbox[1]) - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0),2)

        l_bboxes.append({'x_min':int(bbox[0]),'y_min':int(bbox[1]),'x_max':bbox[2],'y_max':bbox[3],'cls':class_name,'score':score})

    return l_bboxes
项目:TF_Deformable_Net    作者:Zardinality    | 项目源码 | 文件源码
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image
项目:TF_Deformable_Net    作者:Zardinality    | 项目源码 | 文件源码
def _draw_on_image(img, objs, class_sets_dict):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    for ind, obj in enumerate(objs):
        if obj['box'] is None: continue
        x1, y1, x2, y2 = obj['box'].astype(int)
        cls_id = class_sets_dict[obj['class']]
        if obj['class'] == 'dontcare':
            cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1)
            continue
        cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1)
        text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3])
        text += '{:.1f}|'.format(obj['truncation'])
        text += str(obj['occlusion'])
        cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1)
    return img
项目:IoT-Client    作者:suquark    | 项目源码 | 文件源码
def draw_on_detected(frame, rects, timestamp):
    # Draw the bounding box on the frame
    for (x, y, w, h) in rects:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # draw the text and timestamp on the frame
    ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
    cv2.putText(frame, "Status: Open", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255),
                2)
    cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

    # write the image to temporary file

    # t = TempImage()
    # print('File saved at' + str(t.path))
    # cv2.imwrite(t.path, frame)

    # analyze
    # pi_surveillance_analyze.analyze(t.path)
项目:GLMF203    作者:GLMF    | 项目源码 | 文件源码
def archive_with_items(self):
        """ Ecrit dans le dossier d'archive la frame complète avec des carrés dessinés autour
            des visages détectés
        """
        logging.info("Archive l'image avec les items trouvés...")
        # Dessine un carré autour de chaque item
        for f in self.items: 
            x, y, w, h = f #[ v for v in f ] 
            cv2.rectangle(self.frame, (x,y), (x+w,y+h), (0,255,0), 3) 

        # Ajoute la date et l'heure à l'image
        cv2.putText(self.frame, datetime.datetime.now().strftime("%c"), (5, 25), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 3) 

        # On affiche l'image qui va être archivée dans une fenêtre
        if self.debug:
            cv2.imshow("preview", self.frame) 
            cv2.waitKey() 

        # Ecriture du fichier
        archive_full_name = "{0}_full.jpg".format(self.images_prefix)
        logging.info("Archive file is : '{0}'".format(archive_full_name))
        cv2.imwrite(os.path.join(self.archive_folder,  archive_full_name), self.frame)
项目:live-age-gender-estimator    作者:taipalma    | 项目源码 | 文件源码
def annotate(self, frame):
         text = "Frame rate: %.1f" % self.frameRate
         textColor = (0,255,0)
         font = cv2.FONT_HERSHEY_SIMPLEX
         size = 0.5
         thickness = 2
         textSize = cv2.getTextSize(text, font, size, thickness)
         height = textSize[1]         
         location = (0,frame.shape[0] - 4*height)
         cv2.putText(frame, text, location, font, size, textColor,
            thickness=thickness)

         text = "Detection rate: %.1f" % self.detectionRate
         location = (0,frame.shape[0] - height)
         cv2.putText(frame, text, location, font, size, textColor,
            thickness=thickness)