Python cv2 模块,RETR_LIST 实例源码

我们从Python开源项目中,提取了以下42个代码示例,用于说明如何使用cv2.RETR_LIST

项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects)
项目:PaperHelper    作者:EdgarNg1024    | 项目源码 | 文件源码
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares
项目:object-detection-python-opencv    作者:hasanaliqureshi    | 项目源码 | 文件源码
def find_biggest_contour(image):
    # Copy
    image = image.copy()
    #input, gives all the contours, contour approximation compresses horizontal,
    #vertical, and diagonal segments and leaves only their end points. For example,
    #an up-right rectangular contour is encoded with 4 points.
    #Optional output vector, containing information about the image topology.
    #It has as many elements as the number of contours.
    #we dont need it
    _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    # Isolate largest contour
    contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
    biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]

    mask = np.zeros(image.shape, np.uint8)
    cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
    return biggest_contour, mask
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def remove_borders(image):
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)
    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow('edged', edged)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        print(len(approx) == 4)
        if len(approx) == 4:
            screenCnt = approx
            break
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    if screenCnt is not None and len(screenCnt) > 0:
        return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    return orig
项目:beryl    作者:DanielJDufour    | 项目源码 | 文件源码
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                area = cv2.contourArea(cnt)
                if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07):
                            squares.append(cnt)
    return squares
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]]
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def find_squares(img, cos_limit = 0.1):
    print('search for squares with threshold %f' % cos_limit)
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < cos_limit :
                        squares.append(cnt)
                    else:
                        #print('dropped a square with max_cos %f' % max_cos)
                        pass
    return squares

###
### Version V2.  Collect meta-data along the way,  with commentary added.
###
项目:2017-robot    作者:frc1418    | 项目源码 | 文件源码
def find_contours(self, img):

        thresh_img = self.threshold(img)

        _, contours, _ = cv2.findContours(thresh_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        result = []
        for cnt in contours:
            approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), True)

            if self.draw_approx:
                cv2.drawContours(self.out, [approx], -1, self.BLUE, 2, lineType=8)

            if len(approx) > 3 and len(approx) < 15:
                _, _, w, h = cv2.boundingRect(approx)
                if h > self.min_height and w > self.min_width:
                        hull = cv2.convexHull(cnt)
                        approx2 = cv2.approxPolyDP(hull,0.01*cv2.arcLength(hull,True),True)

                        if self.draw_approx2:
                            cv2.drawContours(self.out, [approx2], -1, self.GREEN, 2, lineType=8)

                        result.append(approx2)
        return result
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def findEllipses(edges):
    contours, _ = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    ellipseMask = np.zeros(edges.shape, dtype=np.uint8)
    contourMask = np.zeros(edges.shape, dtype=np.uint8)

    pi_4 = np.pi * 4

    for i, contour in enumerate(contours):
        if len(contour) < 5:
            continue

        area = cv2.contourArea(contour)
        if area <= 100:  # skip ellipses smaller then 10x10
            continue

        arclen = cv2.arcLength(contour, True)
        circularity = (pi_4 * area) / (arclen * arclen)
        ellipse = cv2.fitEllipse(contour)
        poly = cv2.ellipse2Poly((int(ellipse[0][0]), int(ellipse[0][1])), (int(ellipse[1][0] / 2), int(ellipse[1][1] / 2)), int(ellipse[2]), 0, 360, 5)

        # if contour is circular enough
        if circularity > 0.6:
            cv2.fillPoly(ellipseMask, [poly], 255)
            continue

        # if contour has enough similarity to an ellipse
        similarity = cv2.matchShapes(poly.reshape((poly.shape[0], 1, poly.shape[1])), contour, cv2.cv.CV_CONTOURS_MATCH_I2, 0)
        if similarity <= 0.2:
            cv2.fillPoly(contourMask, [poly], 255)

    return ellipseMask, contourMask
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.1, contour_range=7):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]
    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)
        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break
    if screenCnt is None:
        raise EdgeNotFound()
    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])
    return screenCnt
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
        '''
        contour_retrieval_mode is passed through as second argument to cv2.findContours
        '''

        # Attempt to match edges found in blue, green or red channels : collect all
        channel = 0
        for gray in cv2.split(self.img):
            channel += 1
            print('channel %d ' % channel)
            title = self.tgen.next('channel-%d' % channel)
            if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
            found = {}
            for thrs in xrange(0, 255, 26):
                print('Using threshold %d' % thrs)
                if thrs == 0:
                    print('First step')
                    bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                    title = self.tgen.next('canny-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                    bin = cv2.dilate(bin, None)
                    title = self.tgen.next('canny-dilate-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                else:
                    retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
                    title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
                    if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
                bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
                title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
                if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
                    filteredContours = contours
                else:
                    filteredContours = []
                    h = hierarchy[0]
                    for component in zip(contours, h):
                        currentContour = component[0]
                        currentHierarchy = component[1]
                        if currentHierarchy[3] < 0:
                            # Found the outermost parent component
                            filteredContours.append(currentContour)
                    print('Contours filtered.   Input %d  Output %d' % (len(contours), len(filteredContours)))
                    time.sleep(5)
                for cnt in filteredContours:
                    cnt_len = cv2.arcLength(cnt, True)
                    cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                    cnt_len = len(cnt)
                    cnt_area = cv2.contourArea(cnt)
                    cnt_isConvex = cv2.isContourConvex(cnt)
                    if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max)  and cnt_isConvex:
                        cnt = cnt.reshape(-1, 2)
                        max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                        if max_cos < self.cos_limit :
                            sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
                            self.squares.append(sq)
                        else:
                            #print('dropped a square with max_cos %f' % max_cos)
                            pass
                found[thrs] = len(self.squares)
                print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
项目:gaps    作者:nemanja-m    | 项目源码 | 文件源码
def _find_size_candidates(self, image):
        binary_image = self._filter_image(image)

        _, contours, _ = cv2.findContours(binary_image,
                                          cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)

        size_candidates = []
        for contour in contours:
            bounding_rect = cv2.boundingRect(contour)
            contour_area = cv2.contourArea(contour)
            if self._is_valid_contour(contour_area, bounding_rect):
                candidate = (bounding_rect[2] + bounding_rect[3]) / 2
                size_candidates.append(candidate)

        return size_candidates
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def remove_blobs(image, min_area=0, max_area=sys.maxsize, threshold=128,
                 method='8-connected', return_mask=False):
    """Binarize image using threshold, and remove (turn into black)
    blobs of connected pixels of white of size bigger or equal than
    min_area but smaller or equal than max_area from the original image,
    returning it afterward."""
    method = method.lower()
    if method == '4-connected':
        method = cv2.LINE_4
    elif method in ('16-connected', 'antialiased'):
        method = cv2.LINE_AA
    else:  # 8-connected
        method = cv2.LINE_8
    mono_image = binarize_image(image, method='boolean', threshold=threshold)
    _, all_contours, _ = cv2.findContours(mono_image, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
    contours = np.array([contour for contour in all_contours
                         if min_area <= cv2.contourArea(contour) <= max_area])
    mask = np.ones(mono_image.shape, np.uint8)
    cv2.drawContours(mask, contours, -1, 0, -1, lineType=method)
    return image, 255 * mask
项目:pictureflow    作者:mentum    | 项目源码 | 文件源码
def apply(self, item, threshold):

        img = item.img_mat

        img[img > 0] = 255
        _, contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        valid_contours = []

        for contour in contours:
            max_x = contour[:, :, 0].max()
            min_x = contour[:, :, 0].min()

            max_y = contour[:, :, 1].max()
            min_y = contour[:, :, 1].min()

            if (max_x - min_x) * (max_y - min_y) > threshold:
                valid_contours.append(contour)

        yield valid_contours
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def draw_silhouette(self, foreground, bin_mask, tracked_object_stats, centroid):
        contours = cv2.findContours(bin_mask, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)[1]
        for i_contour in range(0, len(contours)):
            cv2.drawContours(foreground, contours, i_contour, (0, 255, 0))
        x1 = tracked_object_stats[cv2.CC_STAT_LEFT]
        x2 = x1 + tracked_object_stats[cv2.CC_STAT_WIDTH]+1
        y1 = tracked_object_stats[cv2.CC_STAT_TOP]
        y2 = y1 + tracked_object_stats[cv2.CC_STAT_HEIGHT]+1
        if SilhouetteExtractor.DRAW_BBOX:
            cv2.rectangle(foreground, (x1, y1), (x2, y2), color=(0, 0, 255))
            cv2.drawMarker(foreground, SilhouetteExtractor.__to_int_tuple(centroid), (0, 0, 255), cv2.MARKER_CROSS, 11)
            bbox_w_h_ratio = tracked_object_stats[cv2.CC_STAT_WIDTH] / tracked_object_stats[cv2.CC_STAT_HEIGHT]
            cv2.putText(foreground, "BBOX w/h ratio: {0:.4f}".format(bbox_w_h_ratio), (x1, y1 - 18),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
        if SilhouetteExtractor.SHOW_INTERSECTS:
            if self.intersects_frame_boundary(x1, x2, y1, y2):
                cv2.putText(foreground, "FRAME BORDER INTERSECT DETECTED", (0, 54), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                            (0, 0, 255))
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.03, contour_range=5, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    # if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None: raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.03, contour_range=5, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None: raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.1, contour_range=7, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None:
        raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.03, contour_range=7, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    # if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None: raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_contours(image):
  #return cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE);
  #return cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
  return cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE);
项目:skastic    作者:mypalmike    | 项目源码 | 文件源码
def find_contours(self):
    _, contours, _ = cv2.findContours(
        self.img_contour,
        cv2.RETR_LIST,
        cv2.CHAIN_APPROX_SIMPLE
    )
    return contours
项目:StormCV2017    作者:2729StormRobotics    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:StormCV2017    作者:2729StormRobotics    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):          
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:diddyborg    作者:piborg    | 项目源码 | 文件源码
def ProcessImage(self, image):
        global autoMode
        # Get the red section of the image
        image = cv2.medianBlur(image, 5)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels!
        red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255)))
        # Find the contours
        contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        # Go through each contour
        foundArea = -1
        foundX = -1
        foundY = -1
        for contour in contours:
            x,y,w,h = cv2.boundingRect(contour)
            cx = x + (w / 2)
            cy = y + (h / 2)
            area = w * h
            if foundArea < area:
                foundArea = area
                foundX = cx
                foundY = cy
        if foundArea > 0:
            ball = [foundX, foundY, foundArea]
        else:
            ball = None
        # Set drives or report ball status
        if autoMode:
            self.SetSpeedFromBall(ball)
        else:
            if ball:
                print 'Ball at %d,%d (%d)' % (foundX, foundY, foundArea)
            else:
                print 'No ball'

    # Set the motor speed from the ball position
项目:diddyborg    作者:piborg    | 项目源码 | 文件源码
def ProcessImage(self, image):
        # Get the red section of the image
        image = cv2.medianBlur(image, 5)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels!
        red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255)))
        # Find the contours
        contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        # Go through each contour
        foundArea = -1
        foundX = -1
        foundY = -1
        for contour in contours:
            x,y,w,h = cv2.boundingRect(contour)
            cx = x + (w / 2)
            cy = y + (h / 2)
            area = w * h
            if foundArea < area:
                foundArea = area
                foundX = cx
                foundY = cy
        if foundArea > 0:
            ball = [foundX, foundY, foundArea]
        else:
            ball = None
        # Set drives or report ball status
        self.SetSpeedFromBall(ball)

    # Set the motor speed from the ball position
项目:vision-code    作者:FIRST-Team-1699    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:Stylus    作者:amaneureka    | 项目源码 | 文件源码
def find_samples_bounding_rect(path):

    min_w = 0
    min_h = 0

    print ('finding bounding box:')
    bar = progressbar.ProgressBar(maxval=num_classes*num_samples,
        widgets=[
        ' [', progressbar.Timer(), '] ',
        progressbar.Bar(),
        ' (', progressbar.ETA(), ') ',
    ])
    bar.start()
    counter = 0

    for i in range(1, num_classes + 1):
        for j in range(1, num_samples + 1):

            filename = '{0}/Sample{1:03d}/img{1:03d}-{2:03d}.png'.format(path, i, j)

            # opencv read -> Gray Image -> Bounding Rect
            im = cv2.imread(filename)
            imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            imgray = cv2.bitwise_not(imgray)
            _, contours, _ = cv2.findContours(imgray, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
            _, _, w, h = cv2.boundingRect(contours[len(contours) - 1])

            # find maximum resolution
            min_w = max(min_w, w)
            min_h = max(min_h, h)

            # update progress bar
            counter = counter + 1
            bar.update(counter)

    bar.finish()
    return min_w, min_h
项目:Stylus    作者:amaneureka    | 项目源码 | 文件源码
def normalize_pygame_image(screen, topleft, width, height):
    data = pygame.image.tostring(screen, 'RGB')
    img = np.fromstring(data, dtype=np.uint8)
    img.shape = (screen.get_height(), screen.get_width(), -1)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = img[topleft[1]:, topleft[0]:]
    _, img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)
    _, contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    x, y, w, h = cv2.boundingRect(contours[len(contours) - 1])
    img = img[y:y+h, x:x+w]
    factor = min(float(width/w), float(height/h))
    img = cv2.resize(img, None, fx=factor, fy=factor, interpolation = cv2.INTER_CUBIC)[:height, :width]
    return img
项目:CompetitionBot2017    作者:Seamonsters-2605    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:CompetitionBot2017    作者:Seamonsters-2605    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:R-CNN_LIGHT    作者:YeongHyeon    | 项目源码 | 文件源码
def contouring(binary_img=None):

    # return two values: contours, hierarchy
    # cv2.RETR_EXTERNAL

    return cv2.findContours(binary_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
项目:GRIP-Raspberry-Pi-3    作者:FRC5254    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:GRIP-Raspberry-Pi-3    作者:FRC5254    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:GRIP-Raspberry-Pi-3    作者:FRC5254    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:2017-Vision    作者:RoboticsTeam4904    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:VisionTest    作者:SamCB    | 项目源码 | 文件源码
def retrieve_subsections(img):
    """Yield coordinates of boxes that contain interesting images

    Yields x, y coordinates; width and height as a tuple

    An example use:

        images = []
        for x, y, w, h in retrieve_subsections(img):
            image.append(img[y:y+h,x:x+w])
    """
    if len(img.shape) == 3:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        gray = img
    results = cv2.cornerHarris(gray, 9, 3, 0.04)
    # Normalise harris points between 0 and 1
    hmin = results.min()
    hmax = results.max()
    results = (results - hmin)/(hmax-hmin)

    # Blur so we retrieve the surrounding details
    results = cv2.GaussianBlur(results, (31, 31), 5)

    # Create a threshold collecting the most interesting areas
    threshold = np.zeros(results.shape, dtype=np.uint8)
    threshold[results>results.mean() * 1.01] = 255

    # Find the bounding box of each threshold, and yield the image
    contour_response = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

    # Different versions of cv2 return a different number of attributes
    if len(contour_response) == 3:
        contours = contour_response[1]
    else:
        contours = contour_response[0]

    for contour in contours:
        # x, y, w, h
        yield cv2.boundingRect(contour)
项目:Vec-Lib    作者:vladan-jovicic    | 项目源码 | 文件源码
def detect_contours(self):
        blurred = cv2.GaussianBlur(self.src, (self.kernel_size, self.kernel_size), self.sigma)

        # apply canny detector
        detected_edges = cv2.Canny(blurred, self.threshold, self.threshold * self.ratio, apertureSize=self.apertureSize, L2gradient=True)

        if self.use_dilate:
            kernel = np.ones((3, 3), np.uint8)
            detected_edges = cv2.morphologyEx(detected_edges, cv2.MORPH_CLOSE, kernel)

        self.contours_img, self.simple_contours, self.hierarchy = cv2.findContours(detected_edges.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
        # pdb.gimp_message(self.hierarchy)
        _, self.full_contours, _ = cv2.findContours(detected_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
项目:Deep-Leafsnap    作者:sujithv28    | 项目源码 | 文件源码
def load_image_and_preprocess(path, segmented_path):
    # Open image from disk
    image = misc.imread(path.strip())
    segmented_image = misc.imread(segmented_path.strip())

    img = segmented_image
    h, w = img.shape[:2]
    height, width = h, w
    # print('Height: {:3d}, Width: {:4d}\n'.format(height,width))
    ret, thresh = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
    _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    # Calculate bounding rectangles for each contour.
    rects = [cv2.boundingRect(cnt) for cnt in contours]
    if rects == []:
        # No contours in image (pure black) just keep entire image
        top_y = 0
        bottom_y = height - 120
        left_x = 0
        right_x = width0 = -120
    else:
        # Calculate the combined bounding rectangle points.
        top_y = max(0, min([y for (x, y, w, h) in rects]) - 40)
        bottom_y = min(height, max([y + h for (x, y, w, h) in rects]) + 80)
        left_x = max(0, min([x for (x, y, w, h) in rects]) - 40)
        right_x = min(width - 1, max([x + w for (x, y, w, h) in rects]) + 80)

        # Prevent Tile Out of Bounds Issues
        if top_y == bottom_y:
            bottom_y = min(height, bottom_y + 1)
            top_y = max(0, top_y - 1)
        if left_x == right_x:
            right_x = min(width, right_x + 1)
            left_x = max(0, left_x - 1)

        # Landscape Image
        if width >= height:
            if left_x >= 450:
                right_x = width - 150
                left_x = 0
            if top_y >= 350 or bottom_y <= 200:
                top_y = 0
                bottom_y = height - 100
        # Portrait
        else:
            if left_x >= 350:
                right_x = width - 100
                left_x = 0
            if top_y >= 450 or bottom_y <= 250:
                top_y = 0
                bottom_y = height - 150

    # Use the rectangle to crop on original image
    img = image[top_y:bottom_y, left_x:right_x]
    img = misc.imresize(img, (224, 224))
    return img
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def detect(self, image):
        floatimage = cv2.cvtColor(np.float32(image), cv2.COLOR_BGR2GRAY) / 255

        if self.gaussian is None or self.gaussian.shape[0] != Configuration.log_kernel_size:
            self.gaussian = cv2.getGaussianKernel(Configuration.log_kernel_size, -1, cv2.CV_32F)

        gaussian_filtered = cv2.sepFilter2D(floatimage, cv2.CV_32F, self.gaussian, self.gaussian)

        # LoG
        filtered = cv2.Laplacian(gaussian_filtered, cv2.CV_32F, ksize=Configuration.log_block_size)

        # DoG
        #gaussian2 = cv2.getGaussianKernel(Configuration.log_block_size, -1, cv2.CV_32F)
        #gaussian_filtered2 = cv2.sepFilter2D(floatimage, cv2.CV_32F, gaussian2, gaussian2)
        #filtered = gaussian_filtered - gaussian_filtered2

        mi = np.min(filtered)
        ma = np.max(filtered)

        if mi - ma != 0:
            filtered = 1 - (filtered - mi) / (ma - mi)

        _, thresholded = cv2.threshold(filtered, Configuration.log_threshold, 1.0, cv2.THRESH_BINARY)
        self.debug = thresholded
        thresholded = np.uint8(thresholded)

        contours = None

        if int(cv2.__version__.split('.')[0]) == 2:
            contours, _ = cv2.findContours(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, _ = cv2.findContours(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        candidates = []

        for i in range(len(contours)):
            rect = cv2.boundingRect(contours[i])
            v1 = rect[0:2]
            v2 = np.add(rect[0:2], rect[2:4])
            if rect[2] < Configuration.log_max_rect_size and rect[3] < Configuration.log_max_rect_size:
                roi = floatimage[v1[1]:v2[1], v1[0]:v2[0]]
                _, _, _, maxLoc = cv2.minMaxLoc(roi)
                maxLoc = np.add(maxLoc, v1)

                candidates.append(maxLoc)

        self.candidates = candidates

        return candidates
项目:Raspberry-Pi-for-Python-Programmers-Cookbook-Second-Edition    作者:PacktPublishing    | 项目源码 | 文件源码
def process_image(raw_image,control):
        global threshold
        text=[]
        images=[]

        #Switch color threshold
        if control == ord("c"):
          threshold=(threshold+1)%len(THRESH_LOW)

        #Keep a copy of the raw image
        text.append("Raw Image %s"%THRESH_TXT[threshold])
        images.append(raw_image)
        #Blur the raw image
        text.append("with Blur...%s"%THRESH_TXT[threshold])
        images.append(cv2.blur(raw_image, BLUR))

        lower = np.array(THRESH_LOW[threshold],dtype="uint8")
        upper = np.array(THRESH_HI[threshold], dtype="uint8")

        text.append("with Threshold...%s"%THRESH_TXT[threshold])
        images.append(cv2.inRange(images[-1], lower, upper))


        # find contours in the threshold image
        text.append("with Contours...%s"%THRESH_TXT[threshold])
        images.append(images[-1].copy())
        image, contours, hierarchy = cv2.findContours(images[-1],
                                                      cv2.RETR_LIST,
                                                      cv2.CHAIN_APPROX_SIMPLE)

        # finding contour with maximum area and store it as best_cnt
        max_area = 0
        best_cnt = 1
        for cnt in contours:
                area = cv2.contourArea(cnt)
                if area > max_area:
                        max_area = area
                        best_cnt = cnt

        # finding centroids of best_cnt and draw a circle there
        M = cv2.moments(best_cnt)
        cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])

        if max_area>0:
          cv2.circle(raw_image,(cx,cy),8,(THRESH_HI[threshold]),-1)
          cv2.circle(raw_image,(cx,cy),4,(THRESH_LOW[threshold]),-1)

        return(images,text)
#End