Python cv2 模块,COLOR_BGR2GRAY 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.COLOR_BGR2GRAY

项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints
项目:facial_emotion_recognition    作者:adamaulia    | 项目源码 | 文件源码
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX

    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)

    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)

    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0)
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:Easitter    作者:TomoyaFujita2016    | 项目源码 | 文件源码
def detectFace(image):
    cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
    FACE_SHAPE = 0.45
    result = image.copy()
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade = cv2.CascadeClassifier(cascadePath)
    faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))

    if len(faceRect) <= 0:
        return False
    else:
        # confirm face
        imageSize = image.shape[0] * image.shape[1]
        #print("d1")
        filteredFaceRects = []
        for faceR in faceRect:
            faceSize = faceR[2]*faceR[3]
            if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
                break
            filteredFaceRects.append(faceR)

        if len(filteredFaceRects) > 0:
            return True
        else:
            return False
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def __init__(self, filename, folder=None, classifier=None):
        """
        :param filename: image with sudoku
        :param folder: folder where to save debug images
        :param classifier: digit classifier
        """
        self.filename = os.path.basename(filename)
        image = cv2.imread(filename)
        self.image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        self.folder = folder or FOLDER
        os.mkdir(os.path.join(self.folder, 'debug/'))
        self.classifier = classifier or DigitClassifier()
        # Default initial values
        self.perspective = False
        self.debug = True
        self.counter = 0
        self.step = -1
项目:SudokuSolver    作者:Anve94    | 项目源码 | 文件源码
def apply_filters(self, image, denoise=False):
        """ This method is used to apply required filters to the
            to extracted regions of interest. Every square in a
            sudoku square is considered to be a region of interest,
            since it can potentially contain a value. """
        # Convert to grayscale
        source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # Denoise the grayscale image if requested in the params
        if denoise:
            denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
            source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
            # source_blur = denoised_gray
        else:
            source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
        source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
        source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
        if ENABLE_PREVIEW_ALL:
            image_preview(source_dilated)
        return source_dilated
项目:Mini-Projects    作者:gaborvecsei    | 项目源码 | 文件源码
def CaptureImage():
    imageName = 'DontCare.jpg' #Just a random string
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
        rgbImage = frame #For capture the image in RGB color space

        # Display the resulting frame
        cv2.imshow('Webcam',rgbImage)
        #Wait to press 'q' key for capturing
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #Set the image name to the date it was captured
            imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
            #Save the image
            cv2.imwrite(imageName, rgbImage)
            break
    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    #Returns the captured image's name
    return imageName
项目:imgpedia    作者:scferrada    | 项目源码 | 文件源码
def compute(self, frame):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        descriptor = []
        dominantGradients = np.zeros_like(frame)
        maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
        maxGradient = np.absolute(maxGradient)
        for k in range(1,len(self.kernels)):
            kernel = self.kernels[k]
            gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
            gradient = np.absolute(gradient)
            np.maximum(maxGradient, gradient, maxGradient)
            indices = (maxGradient == gradient)
            dominantGradients[indices] = k

        frameH, frameW = frame.shape
        for row in range(self.rows):
            for col in range(self.cols):
                mask = np.zeros_like(frame)
                mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
                hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
                hist = cv2.normalize(hist, None)
                descriptor.append(hist)
        return np.concatenate([x for x in descriptor])
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def load_frame_images(self):
        """
        Load images (or image pairs) from self.full_frame_folder_path
        """
        print("Loading frames from '{0:s}'".format(self.full_frame_folder_path))
        all_files = [f for f in os.listdir(self.full_frame_folder_path)
                     if osp.isfile(osp.join(self.full_frame_folder_path, f)) and f.endswith(".png")]
        all_files.sort()

        usable_frame_ct = sys.maxsize

        frame_number_sets = []

        for video in self.videos:
            # assume matching numbers in corresponding left & right files
            files = [f for f in all_files if f.startswith(video.name)]
            files.sort()  # added to be explicit

            cam_frame_ct = 0
            frame_numbers = []
            for ix_pair in range(len(files)):
                frame = cv2.imread(osp.join(self.full_frame_folder_path, files[ix_pair]))
                frame_number = int(re.search(r'\d\d\d\d', files[ix_pair]).group(0))
                frame_numbers.append(frame_number)
                found, corners = cv2.findChessboardCorners(frame, self.board_dims)
                if not found:
                    raise ValueError("Could not find corners in image '{0:s}'".format(files[ix_pair]))
                grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cv2.cornerSubPix(grey, corners, (11, 11), (-1, -1), self.criteria_subpix)
                video.image_points.append(corners)
                video.usable_frames[frame_number] = ix_pair
                cam_frame_ct += 1
            usable_frame_ct = min(usable_frame_ct, cam_frame_ct)
            frame_number_sets.append(frame_numbers)

        if len(self.videos) > 1:
            # check that all cameras have the same frame number sets
            if len(frame_number_sets[0]) != len(frame_number_sets[1]):
                raise ValueError(
                    "There are some non-paired frames in folder '{0:s}'".format(self.full_frame_folder_path))
            for i_fn in range(len(frame_number_sets[0])):
                fn0 = frame_number_sets[0][i_fn]
                fn1 = frame_number_sets[1][i_fn]
                if fn0 != fn1:
                    raise ValueError("There are some non-paired frames in folder '{0:s}'." +
                                     " Check frame {1:d} for camera {2:s} and frame {3:d} for camera {4:s}."
                                     .format(self.full_frame_folder_path,
                                             fn0, self.videos[0].name,
                                             fn1, self.videos[1].name))

        for i_frame in range(usable_frame_ct):
            self.object_points.append(self.board_object_corner_set)
        return usable_frame_ct
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def _resolve_spec(im1, im2):
    im = im1.copy()

    img1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
    img2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)

    # Best pixel selection criteria
    #   1. Pixel difference should be more than 20. (just an experimentally value. Free to change!)
    #   2. Best pixel should have less intensity
    #   3. pixel should not be pure black. (just an additional constraint
    #       to remove black background created by warping)
    mask = np.logical_and((img1 - img2) > DIFF_THRESHOLD, img1 > img2)
    mask = np.logical_and(mask, img2 != 0)

    im[mask] = im2[mask]
    return im
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]]
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
项目:FaceDetected    作者:ttchin    | 项目源码 | 文件源码
def detect_faces_from_picture(pic_file_path):
    print(">>> Let me check this picture: " + pic_file_path)
    frame = cv2.imread(pic_file_path)

    # Detect faces in the frame
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)

    # Match the detected faces with the trained model
    if len(faces) > 0:
        print(">>> Someone is in the picture!")
        for (x, y, w, h) in faces:
            face = frame[y:y+h, x:x+w]
            result = model.predict(face)
            for index, name in model.getTrainCfg():
                if result == index:
                    print(">>> Aha, it's %s!" % name)
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bibs(image):
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  binary = cv2.GaussianBlur(gray,(5,5),0)
  ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
  #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);

  #lapl = cv2.Laplacian(image,cv2.CV_64F)
  #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
  #blurred = cv2.GaussianBlur(lapl,(5,5),0)
  #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #cv2.imwrite("lapl.jpg", lapl)

  edges = cv2.Canny(image,175,200)
  cv2.imwrite("edges.jpg", edges)
  binary = edges

  cv2.imwrite("binary.jpg", binary)
  contours,hierarchy = find_contours(binary)

  return get_rectangles(contours)
项目:imgpedia    作者:scferrada    | 项目源码 | 文件源码
def compute(self, frame):
        #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        dx = cv2.filter2D(frame, cv2.CV_32F, self.xkernel)
        dy = cv2.filter2D(frame, cv2.CV_32F, self.ykernel)
        orientations = np.zeros_like(dx)
        magnitudes = np.zeros_like(dx)
        cv2.cartToPolar(dx,dy, magnitudes,orientations)
        descriptor = []
        frameH, frameW = frame.shape
        mask_threshold = magnitudes <= self.threshold
        for row in range(self.rows):
            for col in range(self.cols):
                mask = np.zeros_like(frame)
                mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 1
                mask[mask_threshold] = 0
                a_, b_ = mask.shape
                hist = cv2.calcHist([orientations], self.channel, mask, [self.bins], self.range)
                hist = cv2.normalize(hist, None)
                descriptor.append(hist)
        return np.concatenate([x for x in descriptor])
项目:3DCNN    作者:bityangke    | 项目源码 | 文件源码
def video3d(self, filename, color=False, skip=True):
        cap = cv2.VideoCapture(filename)
        nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        if skip:
            frames = [x * nframe / self.depth for x in range(self.depth)]
        else:
            frames = [x for x in range(self.depth)]
        framearray = []

        for i in range(self.depth):
            cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
            ret, frame = cap.read()
            frame = cv2.resize(frame, (self.height, self.width))
            if color:
                framearray.append(frame)
            else:
                framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))

        cap.release()
        return np.array(framearray)
项目:StormCV2017    作者:2729StormRobotics    | 项目源码 | 文件源码
def __desaturate(src):
        """Converts a color image into shades of gray.
        Args:
            src: A color numpy.ndarray.
        Returns:
            A gray scale numpy.ndarray.
        """
        (a, b, channels) = src.shape
        if(channels == 1):
            return numpy.copy(src)
        elif(channels == 3):
            return cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
        elif(channels == 4):
            return cv2.cvtColor(src, cv2.COLOR_BGRA2GRAY)
        else:
            raise Exception("Input to desaturate must have 1, 3 or 4 channels")
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def get_image_features(self, img_file, stride=5, padding=True):
        """
        Take an image file as input, and output an array of image features whose matrix size is
        based on the image size. When no padding, and the image size is smaller than the required
        feature space size (in x or y direction), the image is not checked, and this method will
        return a tuple of two empty lists; When padding is True, and the image size is more than
        4 pixels smaller than the require feature space size (in x or y direction), the image is
        not checked either. This method can be used by both the trainer and predictor.
        Args:
            img_file: The file name of the image.
            stride: Optional. The stride of the sliding.
            padding: Optional. Whether to pad the image to fit the feature space size or to
                discard the extra pixels if padding is False.
        Returns:
            coordinates: A list of coordinates, each of which contains y and x that are the top
                left corner offsets of the sliding window.
            features: A matrix (python list), in which each row contains the features of the
                sampling sliding window, while the number of rows depends on the image size of
                the input.
        """
        img = cv2.imread(img_file)
        img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        return self.get_image_array_features(img_arr, stride, padding)
项目:OCV_Vehicles_Features    作者:dan-masek    | 项目源码 | 文件源码
def process_frame(frame_number, frame, keypoint_data, detector, matcher):
    log = logging.getLogger("process_frame")

    # Create a copy of source frame to draw into
    processed = frame.copy()

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    kp, des = detector.detectAndCompute(frame, None)

    # Match descriptors
    matches = matcher.match(keypoint_data.descriptors, des)

    # Sort them in order of distance
    matches = sorted(matches, key = lambda x:x.distance)

    processed = drawMatches(cv2.imread('car.png',0), keypoint_data.keypoints, gray_frame, kp, matches[:])

    return processed

# ============================================================================
项目:ab2016-ros-gazebo    作者:akademikbilisim    | 项目源码 | 文件源码
def camera_callback(self, msg):
        try:
            self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        except cv_bridge.CvBridgeError:
            return

        gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(blur, 30, 150)

        cv2.imshow("Robot Camera", canny)
        cv2.waitKey(1)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def bench(folder):
    from os.path import join
    from video_capture.av_file_capture import File_Capture
    cap = File_Capture(join(folder,'marker-test.mp4'))
    markers = []
    detected_count = 0

    for x in range(500):
        frame = cap.get_frame()
        img = frame.img
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True)

        draw_markers(img, markers)
        cv2.imshow('Detected Markers', img)

        # for m in markers:
        #     if 'img' in m:
        #         cv2.imshow('id %s'%m['id'], m['img'])
        #         cv2.imshow('otsu %s'%m['id'], m['otsu'])
        if cv2.waitKey(1) == 27:
           break
        detected_count += len(markers)
    print(detected_count) #2900 #3042 #3021
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
        cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
        crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
        grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)

        _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        cv2.imshow('Thresh', thresh1)
        contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # draw contour on threshold image
        if len(contours) > 0:
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

        return contours, crop_res


# Check ConvexHull  and Convexity Defects
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_BlobDetector(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    im=255-im
    im2 = img

    params = cv2.SimpleBlobDetector_Params()

    params.filterByArea = True
    params.minArea = obj.Area

    params.filterByConvexity = True
    params.minConvexity = obj.Convexity/200


    # Set up the detector with default parameters.
    detector = cv2.SimpleBlobDetector_create(params)

    # Detect blobs.
    keypoints = detector.detect(im)
    # Draw detected blobs as red circles.
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    if not obj.showBlobs:
        im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        obj.Proxy.img = im_with_keypoints

        for k in keypoints:
            (x,y)=k.pt
            x=int(round(x))
            y=int(round(y))
#           cv2.circle(im,(x,y),4,0,5)
            cv2.circle(im,(x,y),4,255,5)
            cv2.circle(im,(x,y),4,0,5)
            im[y,x]=255
            im[y,x]=0
        obj.Proxy.img = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)

    else:
        for k in keypoints:
            (x,y)=k.pt
            x=int(round(x))
            y=int(round(y))
            cv2.circle(im2,(x,y),4,(255,0,0),5)
            cv2.circle(im2,(x,y),4,(0,0,0),5)
            im2[y,x]=(255,0,0)
            im2[y,x]=(0,0,0)
        obj.Proxy.img = im2
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_GoodFeaturesToTrack(proxy,obj):
    '''
    https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.html
    '''
    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    corners = cv2.goodFeaturesToTrack(gray,obj.maxCorners,obj.qualityLevel,obj.minDistance)
    corners = np.int0(corners)

    for i in corners:
        x,y = i.ravel()
        cv2.circle(img,(x,y),3,255,-1)

    obj.Proxy.img = img
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        print self
        print self.Object
        print self.Object.Name
        obj=self.Object
        img = cv2.imread(obj.imageFile)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = np.float32(gray)
        dst = cv2.cornerHarris(gray,3,3,0.00001)
        dst = cv2.dilate(dst,None)
        img[dst>0.01*dst.max()]=[0,0,255]

        from matplotlib import pyplot as plt
        plt.subplot(121),plt.imshow(img,cmap = 'gray')
        plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(dst,cmap = 'gray')
        plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
        plt.show()
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def main():
    # prepare object points
    nx = 8#TODO: enter the number of inside corners in x
    ny = 6#TODO: enter the number of inside corners in y

    # Make a list of calibration images
    fname = './calibration_wide/GOPR0058.jpg'
    img = cv2.imread(fname)
    plt.imshow(img)

    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    # If found, draw corners
    if ret == True:
        # Draw and display the corners
        cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
        plt.imshow(img)
        plt.show()
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def main():
    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)
    c=0
    last_time = time.time()
    while True:
        c+=1
        screen=grab_screen(title='')
        screenG=cv2.cvtColor(screen,cv2.COLOR_BGR2GRAY)
        screenG=cv2.resize(screenG,(80,60))
        keys=key_check()
        output=keys_to_output(keys)
        training_data.append([screenG,output])
        if c%10==0:
            print('Recording at ' + str((10 / (time.time() - last_time)))+' fps')
            last_time = time.time()

        if len(training_data) % 500 == 0:
            print(len(training_data))
            np.save(file_name,training_data)
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def process_img(img):
    original_image=img
    processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
    processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
    copy=processed_img
    vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
    processed_img = roi(processed_img, np.int32([vertices]))
    verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
    platform = roi(copy, np.int32([verticesP]))
    #                       edges
    #lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
    #draw_lines(processed_img,lines)
    #draw_lines(original_image,lines)

    #Platform lines
    #imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
    ret,thresh = cv2.threshold(platform,127,255,0)
    im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
    try:
        platformpos=contours[0][0][0]
    except:
        platformpos=[[0]]
    circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
                               param1=90, param2=5, minRadius=1, maxRadius=3)

    ballpos=draw_circles(original_image,circles=circles)

    return processed_img,original_image,platform,platformpos,ballpos
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def do_warp(M, warp):
    warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
    warp = exposure.rescale_intensity(warp, out_range = (0, 255))

    # the pokemon we want to identify will be in the top-right
    # corner of the warped image -- let's crop this region out
    (h, w) = warp.shape
    (dX, dY) = (int(w * 0.4), int(h * 0.45))
    crop = warp[10:dY, w - dX:w - 10]

    # save the cropped image to file
    cv2.imwrite("cropped.png", crop)

    # show our images
    cv2.imshow("image", image)
    cv2.imshow("edge", edged)
    cv2.imshow("warp", imutils.resize(warp, height = 300))
    cv2.imshow("crop", imutils.resize(crop, height = 300))
    cv2.waitKey(0)
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def subtract_background(self):
        fgbg = cv2.createBackgroundSubtractorMOG2()
        prev = self.frames[0]
        fgmask = fgbg.apply(prev)
        for (i,next) in enumerate(self.frames[1:]):
            prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
            next_gray = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)
            similarity_metric = compare_ssim(prev_gray, next_gray)
            print('prev/next similarity measure = %f' % similarity_metric)
            if similarity_metric < self.transition_threshold:
                fgmask = fgbg.apply(next)
                fgdn = denoise_foreground(next, fgmask)
                self.transitions.append((1, fgdn))
            else:
                fgmask = fgbg.apply(next)
                self.transitions.append((0, None))
            prev = next.copy()
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bib(image):
  width, height, depth = image.shape

  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  #gray = cv2.equalizeHist(gray)
  blurred = cv2.GaussianBlur(gray,(5,5),0)

  debug_output("find_bib_blurred", blurred)
  #binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0);
  ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY);
  debug_output("find_bib_binary", binary)
  threshold_contours,hierarchy = find_contours(binary)

  debug_output("find_bib_threshold", binary)

  edges = cv2.Canny(gray,175,200, 3)
  edge_contours,hierarchy = find_contours(edges)

  debug_output("find_bib_edges", edges)

  contours = threshold_contours + edge_contours
  debug_output_contours("find_bib_threshold_contours", image, contours)

  rectangles = get_rectangles(contours)

  debug_output_contours("find_bib_rectangles", image, rectangles)

  potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)]

  debug_output_contours("find_bib_potential_bibs", image, potential_bibs)

  ideal_aspect_ratio = 1.0
  potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio))

  return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]])

#
# Checks that the size and aspect ratio of the contour is appropriate for a bib.
#
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def scrub(cls, image):
        """
        Apply Stroke-Width Transform to image.

        :param filepath: relative or absolute filepath to source image
        :return: numpy array representing result of transform
        """

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        canny, sobelx, sobely, theta = cls._create_derivative(gray)
        swt = cls._swt(theta, canny, sobelx, sobely)
        shapes = cls._connect_components(swt)
        swts, heights, widths, topleft_pts, images = cls._find_letters(swt, shapes)
        if(len(swts)==0):
            #didn't find any text, probably a bad face
            return None

        word_images = cls._find_words(swts, heights, widths, topleft_pts, images)

        final_mask = np.zeros(swt.shape)
        for word in word_images:
            final_mask += word
        return final_mask
项目:checkmymeat    作者:kendricktan    | 项目源码 | 文件源码
def predict(url):
    global model      
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    features = describe(image, mask)

    state = le.inverse_transform(model.predict([features]))[0]
    return {'type': state}
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def read_captured_circles(self):
        img = cv2.cvtColor(self.query, cv2.COLOR_BGR2GRAY)
        img = cv2.medianBlur(img, 7)
        cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 30,
                                   param1=50, param2=30, minRadius=20, maxRadius=50)
        if circles is None:
            return
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            if i[1] < 400:
                continue
            self.circlePoints.append((i[0], i[1]))
        if self._debug:
            self.draw_circles(circles, cimg)
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images)
项目:slide_captcha_cracker    作者:chxj1992    | 项目源码 | 文件源码
def predict():
    response = requests.get(slide_captcha_url)
    base64_image = response.json()['data']['dataUrl']
    base64_image_without_head = base64_image.replace('data:image/png;base64,', '')

    bytes_io = BytesIO(base64.b64decode(base64_image_without_head))
    img = np.array(Image.open(bytes_io).convert('RGB'))

    img_blur = cv2.GaussianBlur(img, (3, 3), 0)
    img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
    img_canny = cv2.Canny(img_gray, 100, 200)

    operator = get_operator('shape.png')

    (x, y), _ = best_match(img_canny, operator)
    x = x + bias
    print('the position of x is', x)

    buffer = mark(img, x, y)

    return {'value': x, 'image': base64.b64encode(buffer.getbuffer()).decode()}
项目:Pedestrian-Recognition    作者:yugrocks    | 项目源码 | 文件源码
def __init__(self,img):

        #making two copies of the same image
        original_img = np.array(img)
        new_img = np.array(img)

        #resizing keeping the aspect ratio constant
        a_ratio = new_img.shape[0]/new_img.shape[1]
        #new_row=int(new_img.shape[0])
        new_row = 128
        new_colm = int(new_row/a_ratio)
        new_img = cv2.resize(new_img, (new_colm,new_row), interpolation = cv2.INTER_AREA)
        original_img = cv2.resize(original_img, (new_colm,new_row), interpolation = cv2.INTER_AREA)
        #convert new_one to grayscale
        new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)


        self.original_img = original_img
        self.new_img = new_img
项目:skastic    作者:mypalmike    | 项目源码 | 文件源码
def load(self, filename, analyze_only):
    # Load image, then do various conversions and thresholding.
    self.img_orig = cv2.imread(filename, cv2.IMREAD_COLOR)

    if self.img_orig is None:
      raise CompilerException("File '{}' not found".format(filename))

    self.img_grey = cv2.cvtColor(self.img_orig, cv2.COLOR_BGR2GRAY)
    _, self.img_contour = cv2.threshold(self.img_grey, 250, 255, cv2.THRESH_BINARY_INV)
    _, self.img_text = cv2.threshold(self.img_grey, 150, 255, cv2.THRESH_BINARY)
    self.root_node = None

    self.contours = self.find_contours()

    self.contour_lines, self.contour_nodes = self.categorize_contours()

    self.build_graph()
    self.build_parse_tree()

    self.parse_nodes()

    if not analyze_only:
      self.python_ast = self.root_node.to_python_ast()
项目:hourglasstensorlfow    作者:wbenbihi    | 项目源码 | 文件源码
def open_img(self, name, color = 'RGB'):
        """ Open an image 
        Args:
            name    : Name of the sample
            color   : Color Mode (RGB/BGR/GRAY)
        """
        if name[-1] in self.letter:
            name = name[:-1]
        img = cv2.imread(os.path.join(self.img_dir, name))
        if color == 'RGB':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            return img
        elif color == 'BGR':
            return img
        elif color == 'GRAY':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            print('Color mode supported: RGB/BGR. If you need another mode do it yourself :p')
项目:Automatic_Group_Photography_Enhancement    作者:Yuliang-Zou    | 项目源码 | 文件源码
def getFaceData(img):
    # Create the haar cascade
    faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    # Read the image
    image = cv2.imread(img)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # Detect faces in the image
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        flags = cv2.cv.CV_HAAR_SCALE_IMAGE
     )
    for (x, y, w, h) in faces:
        facedata = image[y:y+h, x:x+w]
    return facedata
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def add_corners(self, i_frame, subpixel_criteria, frame_folder_path=None,
                    save_image=False, save_chekerboard_overlay=False):
        grey_frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        cv2.cornerSubPix(grey_frame, self.current_image_points, (11, 11), (-1, -1), subpixel_criteria)
        if save_image:
            png_path = (os.path.join(frame_folder_path,
                                     "{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png")))
            cv2.imwrite(png_path, self.frame)
            if save_chekerboard_overlay:
                png_path = (os.path.join(frame_folder_path,
                                         "checkerboard_{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png")))
                overlay = self.frame.copy()
                cv2.drawChessboardCorners(overlay, self.current_board_dims, self.current_image_points, True)
                cv2.imwrite(png_path, overlay)
        self.usable_frames[i_frame] = len(self.image_points)
        self.image_points.append(self.current_image_points)
项目:pycalibrate    作者:reconstruct-on-the-fly    | 项目源码 | 文件源码
def load_images(folder_path):
    os.chdir(folder_path)
    image_files = glob.glob('*.JPG')
    print('Found %s images' % len(image_files))
    if len(image_files) == 0:
        return

    images = []
    print('Loading images ', end='')
    for file in image_files:
        image = cv.imread(file)
        gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
        images.append((gray_image, image))
        print('.', end='')
        sys.stdout.flush()

    print('')
    return images
项目:CanLauncher    作者:hazenhamather    | 项目源码 | 文件源码
def scanForFace():
    while 1:
        #This is where we will scan back and forth hunting for a face. We will
        #begin turning the turret and scanning at the same time.

        #Turn Servo one click right and start back other way when we max out

        #Do a cap.read() command
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor = 1.3,
            minNeighbors = 5
        )
        for (x,y,w,h) in faces:
            foundFace = True
            aimToFace()
项目:CanLauncher    作者:hazenhamather    | 项目源码 | 文件源码
def aimToFace():
    while 1:
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor = 1.3,
            minNeighbors = 5
        )
        for (x,y,w,h) in faces:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
            distance = 146.645*math.exp(-7.207e-3*w);
            # print distance
            if x < (halfScreen - 1.5*w):
                #click servo right
                print "Pan Right"
            elif x > (halfScreen + 1.5*w):
                #Click servo left
                print "Pan Left"
            else:
                targetConfirmed = confirmTarget()
                if targetConfirmed:
                    Launch(distance)
                else:
                    break
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def facial_landmark_detection(image, detector, predictor, file):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_size = gray.shape
    landmark_faces = detector(gray, 1)

    faces = list()
    area = 0
    face_idx = 0
    bItr = False
    for (idx, landmark_faces) in enumerate(landmark_faces):
        shape = predictor(gray, landmark_faces)
        shape = shape_to_np(shape)
        (x, y, w, h) = rect_to_bb(landmark_faces, img_size, file)

        if (w * h) > area:
            area = w * h
            faces = [x, y, w, h]
            bItr = True
        #cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
        #cv2.putText(image, "Face #{}".format(idx + 1), (x - 10, y - 10), \
        #           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        #for (x, y) in shape:
        #   cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

    return bItr, faces
项目:apparent-age-gender-classification    作者:danielyou0230    | 项目源码 | 文件源码
def debug_face_classifier(file):
    face_cascade = cv2.CascadeClassifier(xml_face_classifier)
    image = cv2.imread(file)

    image = imutils.resize(image, width=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(image, 1.07, 3)
    print faces
    for (x, y, w, h) in faces:
        cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
        #roi_gray = gray[y:y+h, x:x+w]
        #roi_color = image[y:y+h, x:x+w]

    cv2.imshow('Image', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def image(self):

        img = cv2.imread(self.image_path)
        img = imutils.resize(img,width=min(800,img.shape[1]))
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray,(21,21),0)
        fullbody = self.HogDescriptor(gray)
        for (x,y,w,h) in fullbody:
            cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)

        faces = self.haar_facedetection(gray)
        for (x,y,w,h) in faces:
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            eyes = self.haar_eyedetection(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2) 
            smile = self.haar_smilecascade(roi_gray)
            for (sx,sy,sw,sh) in smile:
                cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
        img = self.dlib_function(img)
        cv2.imshow('img',img)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
项目:AutonomousParking    作者:jovanduy    | 项目源码 | 文件源码
def overlay_img(self):
        """Overlay the transparent, transformed image of the arc onto our CV image"""
        #overlay the arc on the image
        rows, cols, channels = self.transformed.shape
        roi = self.cv_image[0:rows, 0:cols]

        #change arc_image to grayscale
        arc2gray = cv2.cvtColor(self.transformed, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(arc2gray, 10, 255, cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)

        #black out area of arc in ROI
        img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
        img2_fg = cv2.bitwise_and(self.transformed, self.transformed, mask=mask)

        #put arc on ROI and modify the main image
        dst = cv2.add(img1_bg, img2_fg)
        self.cv_image[0:rows, 0:cols] = dst
项目:SOLAMS    作者:aishmittal    | 项目源码 | 文件源码
def display_video_stream(self):
        r , frame = self.capture.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(40, 40),
            flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        )

        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

        frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
        frame = cv2.flip(frame, 1)
        image = QImage(frame, frame.shape[1], frame.shape[0], 
                       frame.strides[0], QImage.Format_RGB888)

        self.imageLabel.setPixmap(QPixmap.fromImage(image))