Python cv2 模块,cvtColor() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.cvtColor()

项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints
项目:facial_emotion_recognition    作者:adamaulia    | 项目源码 | 文件源码
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX

    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)

    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)

    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:Easitter    作者:TomoyaFujita2016    | 项目源码 | 文件源码
def detectFace(image):
    cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
    FACE_SHAPE = 0.45
    result = image.copy()
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade = cv2.CascadeClassifier(cascadePath)
    faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))

    if len(faceRect) <= 0:
        return False
    else:
        # confirm face
        imageSize = image.shape[0] * image.shape[1]
        #print("d1")
        filteredFaceRects = []
        for faceR in faceRect:
            faceSize = faceR[2]*faceR[3]
            if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
                break
            filteredFaceRects.append(faceR)

        if len(filteredFaceRects) > 0:
            return True
        else:
            return False
项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def __init__(self, filename, folder=None, classifier=None):
        """
        :param filename: image with sudoku
        :param folder: folder where to save debug images
        :param classifier: digit classifier
        """
        self.filename = os.path.basename(filename)
        image = cv2.imread(filename)
        self.image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        self.folder = folder or FOLDER
        os.mkdir(os.path.join(self.folder, 'debug/'))
        self.classifier = classifier or DigitClassifier()
        # Default initial values
        self.perspective = False
        self.debug = True
        self.counter = 0
        self.step = -1
项目:SudokuSolver    作者:Anve94    | 项目源码 | 文件源码
def apply_filters(self, image, denoise=False):
        """ This method is used to apply required filters to the
            to extracted regions of interest. Every square in a
            sudoku square is considered to be a region of interest,
            since it can potentially contain a value. """
        # Convert to grayscale
        source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # Denoise the grayscale image if requested in the params
        if denoise:
            denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
            source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
            # source_blur = denoised_gray
        else:
            source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
        source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
        source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
        if ENABLE_PREVIEW_ALL:
            image_preview(source_dilated)
        return source_dilated
项目:Mini-Projects    作者:gaborvecsei    | 项目源码 | 文件源码
def CaptureImage():
    imageName = 'DontCare.jpg' #Just a random string
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
        rgbImage = frame #For capture the image in RGB color space

        # Display the resulting frame
        cv2.imshow('Webcam',rgbImage)
        #Wait to press 'q' key for capturing
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #Set the image name to the date it was captured
            imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
            #Save the image
            cv2.imwrite(imageName, rgbImage)
            break
    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    #Returns the captured image's name
    return imageName
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def _resolve_spec(im1, im2):
    im = im1.copy()

    img1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
    img2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)

    # Best pixel selection criteria
    #   1. Pixel difference should be more than 20. (just an experimentally value. Free to change!)
    #   2. Best pixel should have less intensity
    #   3. pixel should not be pure black. (just an additional constraint
    #       to remove black background created by warping)
    mask = np.logical_and((img1 - img2) > DIFF_THRESHOLD, img1 > img2)
    mask = np.logical_and(mask, img2 != 0)

    im[mask] = im2[mask]
    return im
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]]
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe")
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
项目:FaceDetected    作者:ttchin    | 项目源码 | 文件源码
def detect_faces_from_picture(pic_file_path):
    print(">>> Let me check this picture: " + pic_file_path)
    frame = cv2.imread(pic_file_path)

    # Detect faces in the frame
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)

    # Match the detected faces with the trained model
    if len(faces) > 0:
        print(">>> Someone is in the picture!")
        for (x, y, w, h) in faces:
            face = frame[y:y+h, x:x+w]
            result = model.predict(face)
            for index, name in model.getTrainCfg():
                if result == index:
                    print(">>> Aha, it's %s!" % name)
项目:chainer-cyclegan    作者:Aixile    | 项目源码 | 文件源码
def get_example(self, i):
        id = self.all_keys[i]
        img = None
        val = self.db.get(id.encode())

        img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1)
        img = self.do_augmentation(img)

        img_color = img
        img_color = self.preprocess_image(img_color)

        img_line = XDoG(img)
        img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB)
        #if img_line.ndim == 2:
        #    img_line = img_line[:, :, np.newaxis]
        img_line = self.preprocess_image(img_line)

        return img_line, img_color
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
项目:RunescapeBots    作者:lukegarbutt    | 项目源码 | 文件源码
def print_img_array(self):
        img = self.take_screenshot('array')
        #converts image to HSV 
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # gets the values from the sliders
        low_hue = self.low_hue.get()
        low_sat = self.low_sat.get()
        low_val = self.low_val.get()
        # gets upper values from sliders
        high_hue = self.high_hue.get()
        high_sat = self.high_sat.get()
        high_val = self.high_val.get()
        lower_color = np.array([low_hue,low_sat,low_val]) 
        upper_color= np.array([high_hue,high_sat,high_val])
        #creates the mask and result
        mask = cv2.inRange(self.hsv_image, lower_color, upper_color)
        mask = np.array(mask)
        mask.view


# Instance of Tkinter
项目:robik    作者:RecunchoMaker    | 项目源码 | 文件源码
def get_color_medio(self, roi, a,b,imprimir = False):
        xl,yl,ch = roi.shape
        roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
        roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
        h,s,v=cv2.split(roihsv)
        mask=(h<5)
        h[mask]=200

        roihsv = cv2.merge((h,s,v))
        std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
        media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
        mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)

        if std[0]<12 and std[1]<12 and std[2]<12:
        #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
            media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
            # el amarillo tiene 65 de saturacion y sobre 200
            if media[1]<60: #and (abs(media[0]-30)>10):
                # blanco
                return [-10,0,0]
            else:
                return media
        else:
            return None
项目:yolo_tensorflow    作者:hizhangp    | 项目源码 | 文件源码
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result
项目:guided-filter    作者:lisabug    | 项目源码 | 文件源码
def test_color():
    image = cv2.imread('data/Lenna.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    noise = (np.random.rand(image.shape[0], image.shape[1], 3) - 0.5) * 50
    image_noise = image + noise

    radius = [1, 2, 4]
    eps = [0.005]

    combs = list(itertools.product(radius, eps))

    vis.plot_single(to_32F(image), title='origin')
    vis.plot_single(to_32F(image_noise), title='noise')

    for r, e in combs:
        GF = GuidedFilter(image, radius=r, eps=e)
        vis.plot_single(to_32F(GF.filter(image_noise)), title='r=%d, eps=%.3f' % (r, e))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def colormap(im, min_threshold=0.01):
    mask = im<min_threshold
    if im.ndim == 1: 
        print im
        hsv = np.zeros((len(im), 3), dtype=np.uint8)
        hsv[:,0] = (im * 180).astype(np.uint8)
        hsv[:,1] = 255
        hsv[:,2] = 255
        bgr = cv2.cvtColor(hsv.reshape(-1,1,3), cv2.COLOR_HSV2BGR).reshape(-1,3)
        bgr[mask] = 0
    else: 
        hsv = np.zeros((im.shape[0], im.shape[1], 3), np.uint8)
        hsv[...,0] = (im * 180).astype(np.uint8)
        hsv[...,1] = 255
        hsv[...,2] = 255
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        bgr[mask] = 0
    return bgr
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def random_saturation(img, label, lower=0.5, upper=1.5):
    """
    Multiplies saturation with a constant and clips the value between [0,1.0]
    Args:
        img: input image in float32
        label: returns label unchanged
        lower: lower val for sampling
        upper: upper val for sampling
    """
    alpha = lower + (upper - lower) * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # saturation should always be within [0,1.0]
    hsv[:, :, 1] = np.clip(alpha * hsv[:, :, 1], 0.0, 1.0)

    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def random_hue(img, label, max_delta=10):
    """
    Rotates the hue channel
    Args:
        img: input image in float32
        max_delta: Max number of degrees to rotate the hue channel
    """
    # Rotates the hue channel by delta degrees
    delta = -max_delta + 2.0 * max_delta * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hchannel = hsv[:, :, 0]
    hchannel = delta + hchannel

    # hue should always be within [0,360]
    idx = np.where(hchannel > 360)
    hchannel[idx] = hchannel[idx] - 360
    idx = np.where(hchannel < 0)
    hchannel[idx] = hchannel[idx] + 360

    hsv[:, :, 0] = hchannel
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def get_image_features(self, img_file, stride=5, padding=True):
        """
        Take an image file as input, and output an array of image features whose matrix size is
        based on the image size. When no padding, and the image size is smaller than the required
        feature space size (in x or y direction), the image is not checked, and this method will
        return a tuple of two empty lists; When padding is True, and the image size is more than
        4 pixels smaller than the require feature space size (in x or y direction), the image is
        not checked either. This method can be used by both the trainer and predictor.
        Args:
            img_file: The file name of the image.
            stride: Optional. The stride of the sliding.
            padding: Optional. Whether to pad the image to fit the feature space size or to
                discard the extra pixels if padding is False.
        Returns:
            coordinates: A list of coordinates, each of which contains y and x that are the top
                left corner offsets of the sliding window.
            features: A matrix (python list), in which each row contains the features of the
                sampling sliding window, while the number of rows depends on the image size of
                the input.
        """
        img = cv2.imread(img_file)
        img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        return self.get_image_array_features(img_arr, stride, padding)
项目:OCV_Vehicles_Features    作者:dan-masek    | 项目源码 | 文件源码
def process_frame(frame_number, frame, keypoint_data, detector, matcher):
    log = logging.getLogger("process_frame")

    # Create a copy of source frame to draw into
    processed = frame.copy()

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    kp, des = detector.detectAndCompute(frame, None)

    # Match descriptors
    matches = matcher.match(keypoint_data.descriptors, des)

    # Sort them in order of distance
    matches = sorted(matches, key = lambda x:x.distance)

    processed = drawMatches(cv2.imread('car.png',0), keypoint_data.keypoints, gray_frame, kp, matches[:])

    return processed

# ============================================================================
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def check_image(name):
    expected_data = json.loads(open('./img/' + name + '.json').read())
    if not expected_data['enabled']:
        return

    expected_targets = expected_data['targets']

    img = cv2.imread('./img/' + name + '.jpg', cv2.IMREAD_COLOR)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    args = config.copy()
    args['img'] = hsv
    args['output_images'] = {}

    actual_targets = find(**args)

    # make sure same number of targets are detected
    assert len(expected_targets) == len(actual_targets)

    # targets is a list of 2-tuples with expected and actual results
    targets = zip(expected_targets, actual_targets)
    # compare all the different features of targets to make sure they match
    for pair in targets:
        expected, actual = pair
        # make sure that the targets are close to where they are supposed to be
        assert is_close(expected['pos']['x'], actual['pos']['x'], 0.02)
        assert is_close(expected['pos']['y'], actual['pos']['y'], 0.02)
        # make sure that the targets are close to the size they are supposed to be
        assert is_close(expected['size']['width'], actual['size']['width'], 0.02)
        assert is_close(expected['size']['height'], actual['size']['height'], 0.02)
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def handle_image(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    new_data_condition.acquire()
    state['img'] = hsv
    args = config['target'].copy()
    args['img'] = hsv
    args['draw_output'] = state['draw_output']
    args['output_images'] = {}

    targets = vision.find(**args)
    state['targets'] = targets
    state['output_images'] = args['output_images']
    new_data_condition.notify_all()
    new_data_condition.release()

    fps, processing_time = update_fps()
    state['fps'] = round(fps, 1)
    print 'Processed in', processing_time, 'ms, max fps =', round(fps_smoothed, 1)
项目:speed    作者:keon    | 项目源码 | 文件源码
def optical_flow(one, two):
    """
    method taken from (https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4)
    """
    one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY)
    two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY)
    hsv = np.zeros((120, 320, 3))
    # set saturation
    hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1]
    # obtain dense optical flow paramters
    flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None,
                                        pyr_scale=0.5, levels=1, winsize=15,
                                        iterations=2,
                                        poly_n=5, poly_sigma=1.1, flags=0)
    # convert from cartesian to polar
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    # hue corresponds to direction
    hsv[:,:,0] = ang * (180/ np.pi / 2)
    # value corresponds to magnitude
    hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    # convert HSV to int32's
    hsv = np.asarray(hsv, dtype= np.float32)
    rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
    return rgb_flow
项目:STS-PiLot    作者:mark-orion    | 项目源码 | 文件源码
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def plot_over_img(self, img, x, y, x_pr, y_pr, bb_gt):
        """Plot the landmarks over the image with the bbox."""
        plt.close("all")
        fig = plt.figure(frameon=False)  # , figsize=(15, 10.8), dpi=200
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), aspect="auto")
        ax.scatter(x, y, s=10, color='r')
        ax.scatter(x_pr, y_pr, s=10, color='g')
        rect = patches.Rectangle(
            (bb_gt[0], bb_gt[1]), bb_gt[2]-bb_gt[0], bb_gt[3]-bb_gt[1],
            linewidth=1, edgecolor='b', facecolor='none')
        ax.add_patch(rect)
        fig.add_axes(ax)

        return fig
项目:ab2016-ros-gazebo    作者:akademikbilisim    | 项目源码 | 文件源码
def camera_callback(self, msg):
        try:
            self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        except cv_bridge.CvBridgeError:
            return

        gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(blur, 30, 150)

        cv2.imshow("Robot Camera", canny)
        cv2.waitKey(1)
项目:PixivAvatarBot    作者:kophy    | 项目源码 | 文件源码
def generate_avatar(dir, filename):
    """
    ????????????dir/avatar_filename
    :return: ?????????bool?
    """
    pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
    image = None;
    try:
        image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
    except:
        image = numpy.array(pil_image);
    avatar = crop_avatar(image);
    if avatar is None:
        return False;
    else:
        cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
        return True;
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def embed(self,ori_img, wm, key=10):
        B =  ori_img
        if len(ori_img.shape ) > 2 :
            img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2YUV)
            signature = BlindWatermark._gene_signature(wm,256,key).flatten()
            B= img[:,:,0]

        w,h = B.shape[:2]
        if w< 64 or h <64 :
            print('????????????? 64 pixel.?????????.')
            return ori_img

        if len(ori_img.shape ) > 2 :
            img[:,:,0] = self.inner_embed(B,signature)  
            ori_img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)
        else :
            ori_img = B
        return ori_img
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def create_heatmaps(img, pred):
    """
    Uses objectness probability to draw a heatmap on the image and returns it
    """
    # find anchors with highest prediction
    best_pred = np.max(pred[..., 0], axis=-1)
    # convert probabilities to colormap scale
    best_pred = np.uint8(best_pred * 255)
    # apply color map
    # cv2 colormaps create BGR, not RGB
    cmap = cv2.cvtColor(cv2.applyColorMap(best_pred, cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)
    # resize the color map to fit image
    cmap = cv2.resize(cmap, img.shape[1::-1], interpolation=cv2.INTER_NEAREST)

    # overlay cmap with image
    return cv2.addWeighted(cmap, 1, img, 0.5, 0)
项目:YOLO-Object-Detection-Tensorflow    作者:huseinzol05    | 项目源码 | 文件源码
def detect(img):
    img_h, img_w, _ = img.shape
    inputs = cv2.resize(img, (settings.image_size, settings.image_size))
    inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
    inputs = (inputs / 255.0) * 2.0 - 1.0
    inputs = np.reshape(inputs, (1, settings.image_size, settings.image_size, 3))
    result = detect_from_cvmat(inputs)[0]
    print result

    for i in range(len(result)):
        result[i][1] *= (1.0 * img_w / settings.image_size)
        result[i][2] *= (1.0 * img_h / settings.image_size)
        result[i][3] *= (1.0 * img_w / settings.image_size)
        result[i][4] *= (1.0 * img_h / settings.image_size)

    return result
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
        cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
        crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
        grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)

        _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        cv2.imshow('Thresh', thresh1)
        contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # draw contour on threshold image
        if len(contours) > 0:
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

        return contours, crop_res


# Check ConvexHull  and Convexity Defects
项目:kaggle-dstl-satellite-imagery-feature-detection    作者:u1234x1234    | 项目源码 | 文件源码
def get_data(image_id, a_size, m_size, p_size, sf):
    rgb_data = get_rgb_data(image_id)
    rgb_data = cv2.resize(rgb_data, (p_size*sf, p_size*sf),
                          interpolation=cv2.INTER_LANCZOS4)

#    rgb_data = rgb_data.astype(np.float) / 2500.
#    print(np.max(rgb_data), np.mean(rgb_data))

#    rgb_data[:, :, 0] = exposure.equalize_adapthist(rgb_data[:, :, 0], clip_limit=0.04)
#    rgb_data[:, :, 1] = exposure.equalize_adapthist(rgb_data[:, :, 1], clip_limit=0.04)
#    rgb_data[:, :, 2] = exposure.equalize_adapthist(rgb_data[:, :, 2], clip_limit=0.04)    

    A_data = get_spectral_data(image_id, a_size*sf, a_size*sf, bands=['A'])
    M_data = get_spectral_data(image_id, m_size*sf, m_size*sf, bands=['M'])
    P_data = get_spectral_data(image_id, p_size*sf, p_size*sf, bands=['P'])

#    lab_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2LAB)
    P_data = np.concatenate([rgb_data, P_data], axis=2)

    return A_data, M_data, P_data
项目:face_detection    作者:chintak    | 项目源码 | 文件源码
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
    if path:
        im = cv2.imread(p)
    else:
        im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
    if scale:
        h, w, _ = im.shape
        cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
                      (int(bb[2] * h), int(bb[3] * w)),
                      (255, 255, 0), thickness=4)
        # print bb * np.asarray([h, w, h, w])
    else:
        cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
                      (255, 255, 0), thickness=4)
        print "no"
    if plot:
        plt.figure()
        plt.imshow(im[:, :, ::-1])
    else:
        return im[:, :, ::-1]
项目:PiWifiCam    作者:mark-orion    | 项目源码 | 文件源码
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
        edges = cv2.Canny(img,obj.minVal,obj.maxVal)
        color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
        edges=color

        if True:
            print "zeige"
            cv2.imshow(obj.Label,edges)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=edges
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,closing)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=closing
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_BlobDetector(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    im=255-im
    im2 = img

    params = cv2.SimpleBlobDetector_Params()

    params.filterByArea = True
    params.minArea = obj.Area

    params.filterByConvexity = True
    params.minConvexity = obj.Convexity/200


    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(im)
    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    if not obj.showBlobs:
        im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        obj.Proxy.img = im_with_keypoints

        for k in keypoints:
            (x,y)=k.pt
            x=int(round(x))
            y=int(round(y))
#           cv2.circle(im,(x,y),4,0,5)
            cv2.circle(im,(x,y),4,255,5)
            cv2.circle(im,(x,y),4,0,5)
            im[y,x]=255
            im[y,x]=0
        obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)

    else:
        for k in keypoints:
            (x,y)=k.pt
            x=int(round(x))
            y=int(round(y))
            cv2.circle(im2,(x,y),4,(255,0,0),5)
            cv2.circle(im2,(x,y),4,(0,0,0),5)
            im2[y,x]=(255,0,0)
            im2[y,x]=(0,0,0)
        obj.Proxy.img = im2
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_ColorSpace(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')
    hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)

    lower = np.array([max(obj.h1-obj.h2,0),max(obj.s1-obj.s2,0),max(obj.v1-obj.v2,0)])
    upper = np.array([min(obj.h1+obj.h2,255),min(obj.s1+obj.s2,255),min(obj.v1+obj.v2,255)])
    say("ee")
    say(lower)
    say(upper)

    mask = cv2.inRange(hsv, lower, upper)
    mask = cv2.inRange(img, lower, upper)

    res = cv2.bitwise_and(img,img, mask= mask)

    obj.Proxy.img=res
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_GoodFeaturesToTrack(proxy,obj):
    '''
    https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html
    '''
    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    corners = cv2.goodFeaturesToTrack(gray,obj.maxCorners,obj.qualityLevel,obj.minDistance)
    corners = np.int0(corners)

    for i in corners:
        x,y = i.ravel()
        cv2.circle(img,(x,y),3,255,-1)

    obj.Proxy.img = img
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_HSV(proxy,obj):

    say("hsv ..")

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    lower=np.array([obj.valueColor-obj.deltaColor,0,0])
    upper=np.array([obj.valueColor+obj.deltaColor,255,255])
    mask = cv2.inRange(hsv, lower, upper)

    res = cv2.bitwise_and(hsv,hsv, mask= mask)

    obj.Proxy.img=res
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        print self
        print self.Object
        print self.Object.Name
        obj=self.Object
        img = cv2.imread(obj.imageFile)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = np.float32(gray)
        dst = cv2.cornerHarris(gray,3,3,0.00001)
        dst = cv2.dilate(dst,None)
        img[dst>0.01*dst.max()]=[0,0,255]

        from matplotlib import pyplot as plt
        plt.subplot(121),plt.imshow(img,cmap = 'gray')
        plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(dst,cmap = 'gray')
        plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
        plt.show()
项目:Face-Recognition    作者:irmowan    | 项目源码 | 文件源码
def img_process(im, landmark, print_img=False):
    """
    Image processing, rotate, resize, and crop the face image.

    Args:
        im: numpy array, Original image
        landmark: 5 landmark points

    Return:
        Crop face region
    """
    if landmark is None:
        im_rez = cv2.resize(im, (cfg.crop_size, cfg.crop_size))
        return im_rez
    im_rot, ang, r_landmark = im_rotate(im, landmark)
    im_rez, resize_scale, rez_landmark = im_resize(im_rot, r_landmark, ang)
    crop = im_crop(im_rez, rez_landmark, resize_scale)
    if cfg.forcegray == True:
        crop = cv2.cvtColor(crop, cv2.COLOR_RGB2GRAY)
    # print('Shapes' + str(im_rot.shape) + str(im_rez.shape) + str(crop.shape))
    # return im_rot, im_rez, crop, (crop.astype(np.float) - cfg.PIXEL_MEANS) / cfg.scale
    if print_img:
        return im, im_rot, im_rez, crop
    return crop