Python cv2 模块,CHAIN_APPROX_NONE 实例源码

我们从Python开源项目中,提取了以下31个代码示例,用于说明如何使用cv2.CHAIN_APPROX_NONE

项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]]
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def diff_rect(img1, img2, pos=None):
    """find counters include pos in differences between img1 & img2 (cv2 images)"""
    diff = cv2.absdiff(img1, img2)
    diff = cv2.GaussianBlur(diff, (3, 3), 0)
    edges = cv2.Canny(diff, 100, 200)
    _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    if not contours:
        return None
    contours.sort(key=lambda c: len(c))
    # no pos provide, just return the largest different area rect
    if pos is None:
        cnt = contours[-1]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        return (x0, y0, x1, y1)
    # else the rect should contain the pos
    x, y = pos
    for i in range(len(contours)):
        cnt = contours[-1-i]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        if x0 <= x <= x1 and y0 <= y <= y1:
            return (x0, y0, x1, y1)
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
        cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
        crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
        grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)

        _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        cv2.imshow('Thresh', thresh1)
        contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # draw contour on threshold image
        if len(contours) > 0:
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

        return contours, crop_res


# Check ConvexHull  and Convexity Defects
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def diff_rect(img1, img2, pos=None):
    """find counters include pos in differences between img1 & img2 (cv2 images)"""
    diff = cv2.absdiff(img1, img2)
    diff = cv2.GaussianBlur(diff, (3, 3), 0)
    edges = cv2.Canny(diff, 100, 200)
    _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    if not contours:
        return None
    contours.sort(key=lambda c: len(c))
    # no pos provide, just return the largest different area rect
    if pos is None:
        cnt = contours[-1]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        return (x0, y0, x1, y1)
    # else the rect should contain the pos
    x, y = pos
    for i in range(len(contours)):
        cnt = contours[-1-i]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        if x0 <= x <= x1 and y0 <= y <= y1:
            return (x0, y0, x1, y1)
项目:DeepFryBot    作者:asdvek    | 项目源码 | 文件源码
def find_chars(img):
    gray = np.array(img.convert("L"))
    ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
    image_final = cv2.bitwise_and(gray, gray, mask=mask)
    ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new_img, kernel, iterations=1)
    # Image.fromarray(dilated).save('out.png') # for debugging
    _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    coords = []
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # ignore large chars (probably not chars)
        if w > 70 and h > 70:
            continue
        coords.append((x, y, w, h))
    return coords


# find list of eye coordinates in image
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def cropCircle(img, resize=None):
    if resize:
        if (img.shape[0] > img.shape[1]):
            tile_size = (int(img.shape[1] * resize / img.shape[0]), resize)
        else:
            tile_size = (resize, int(img.shape[0] * resize / img.shape[1]))
        img = cv2.resize(img, dsize=tile_size, interpolation=cv2.INTER_CUBIC)
    else:
        tile_size = img.shape

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key=cv2.contourArea, reverse=True)[0]

    ff = np.zeros((gray.shape[0], gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0] + 2, gray.shape[1] + 2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1] / 2), int(gray.shape[0] / 2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0], rect[2]), max(rect[0], rect[2]), min(rect[1], rect[3]), max(rect[1], rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff, (min(rect[1], rect[3]), min(rect[0], rect[2])), (max(rect[1], rect[3]), max(rect[0], rect[2])), 3,
                  2)

    return [img_crop, rectangle, tile_size]
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def logoDetect(img,imgo):
    '''???????????????'''
    imglogo=imgo.copy()
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
    #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
    ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
    img=cv2.Canny(img,100,200)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    img = cv2.dilate(img, element2,iterations = 1)
    img = cv2.erode(img, element1, iterations = 3)
    img = cv2.dilate(img, element2,iterations = 3)

    #????
    im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    tema=0
    result=[]
    for con in contours:
        x,y,w,h=cv2.boundingRect(con)
        area=w*h
        ratio=max(w/h,h/w)
        if area>300 and area<20000 and ratio<2:
            if area>tema:
                tema=area
                result=[x,y,w,h]
                ratio2=ratio
    #?????????????????,??????????
    logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
    logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
    cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
    cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
    print tema,ratio2,result
    logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
    cv2.imwrite('./logo2.jpg',logo2)

    return img
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def find_concetric_circles(gray_img,min_ring_count=3, visual_debug=False):

    # get threshold image used to get crisp-clean edges using blur to remove small features
    edges = cv2.adaptiveThreshold(cv2.blur(gray_img,(3,3)), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 11)
    _, contours, hierarchy = cv2.findContours(edges,
                                    mode=cv2.RETR_TREE,
                                    method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
    if visual_debug is not False:
        cv2.drawContours(visual_debug,contours,-1,(200,0,0))
    if contours is None or hierarchy is None:
        return []
    clusters = get_nested_clusters(contours,hierarchy[0],min_nested_count=min_ring_count)
    concentric_cirlce_clusters = []

    #speed up code by caching computed ellipses
    ellipses = {}

    # for each cluster fit ellipses and cull members that dont have good ellipse fit
    for cluster in clusters:
        if visual_debug is not False:
            cv2.drawContours(visual_debug, [contours[i] for i in cluster],-1, (0,0,255))
        candidate_ellipses = []
        for i in cluster:
            c = contours[i]
            if len(c)>5:
                if not i in ellipses:
                    e = cv2.fitEllipse(c)
                    fit = max(dist_pts_ellipse(e,c))
                    ellipses[i] = e,fit
                else:
                    e,fit = ellipses[i]
                a,b = e[1][0]/2.,e[1][1]/2.
                if fit<max(2,max(e[1])/20):
                    candidate_ellipses.append(e)
                    if visual_debug is not False:
                        cv2.ellipse(visual_debug, e, (0,255,0),1)

        if candidate_ellipses:
            cluster_center = np.mean(np.array([e[0] for e in candidate_ellipses]),axis=0)
            candidate_ellipses = [e for e in candidate_ellipses if np.linalg.norm(e[0]-cluster_center)<max(3,min(e[1])/20) ]
            if len(candidate_ellipses) >= min_ring_count:
                concentric_cirlce_clusters.append(candidate_ellipses)
                if visual_debug is not False:
                    cv2.ellipse(visual_debug, candidate_ellipses[-1], (0,255,255),4)

    #return clusters sorted by size of outmost cirlce biggest first.
    return sorted(concentric_cirlce_clusters,key=lambda e:-max(e[-1][1]))
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def single_finger_check(self, cnt):
        # use single finger image to check current fame has single finger
        grey_fin1 = cv2.cvtColor(self.fin1, cv2.COLOR_BGR2GRAY)
        _, thresh_fin1 = cv2.threshold(grey_fin1, 127, 255, 0)
        contour_fin1, hierarchy = cv2.findContours(thresh_fin1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        cnt1 = contour_fin1[0]
        ret1 = cv2.matchShapes(cnt, cnt1, 1, 0)

        grey_fin2 = cv2.cvtColor(self.fin2, cv2.COLOR_BGR2GRAY)
        _, thresh_fin2 = cv2.threshold(grey_fin2, 127, 255, 0)
        contour_fin2, hierarchy = cv2.findContours(thresh_fin2.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        cnt2 = contour_fin2[0]
        ret2 = cv2.matchShapes(cnt, cnt2, 1, 0)

        grey_fin3 = cv2.cvtColor(self.fin3, cv2.COLOR_BGR2GRAY)
        _, thresh_fin3 = cv2.threshold(grey_fin3, 127, 255, 0)
        contour_fin3, hierarchy = cv2.findContours(thresh_fin3.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        cnt3 = contour_fin3[0]
        ret3 = cv2.matchShapes(cnt, cnt3, 1, 0)
        reta = (ret1 + ret2 + ret3)/3
        if reta <= 0.3:
            return 5        # set as one-finger module
        else:
            return 0        # not detect, still 0


# Use PyAutoGUI to control mouse event
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_contours(image):
  #return cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE);
  #return cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
  return cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE);
项目:slide_captcha_cracker    作者:chxj1992    | 项目源码 | 文件源码
def get_operator(path, url=False, expand=False):
    if url:
        req = requests.get(path)
        arr = np.asarray(bytearray(req.content), dtype=np.uint8)
        shape = cv2.resize(cv2.imdecode(arr, -1), (69, 69))
    else:
        shape = cv2.resize(cv2.imread('shape.png'), (69, 69))

    shape_gray = cv2.cvtColor(shape, cv2.COLOR_BGR2GRAY)

    _, shape_binary = cv2.threshold(shape_gray, 127, 255, cv2.THRESH_BINARY)

    _, contours, hierarchy = cv2.findContours(shape_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    contour = contours[0]

    operator = np.zeros((69, 69))

    for point in contour:
        operator[point[0][0]][point[0][1]] = 1
        if expand:
            if point[0][0] > 0:
                operator[point[0][0] - 1][point[0][1]] = 1
            if point[0][0] < 68:
                operator[point[0][0] + 1][point[0][1]] = 1
            if point[0][1] > 0:
                operator[point[0][0]][point[0][1] - 1] = 1
            if point[0][1] < 68:
                operator[point[0][0]][point[0][1] + 1] = 1

    return operator
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_image(self, cv_image, header, tag):
        """ process the image """
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

        # mask for color range
        if self.color_range:
            mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1])
            count = cv2.countNonZero(mask)
            if count:
                kernel = np.ones((5, 5), np.uint8)
                mask = cv2.dilate(mask, kernel, iterations=2)
                contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)

                for i, c in enumerate(contours):
                    x, y, w, h = cv2.boundingRect(c)
                    if self.prefix is not None:
                        name = '{0}{1}_{2}_{3}.png'.format(self.prefix,
                                                           tag,
                                                           header.seq, i)
                        print name
                        roi = cv_image[y:y+h, x:x+w]
                        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                        gray = cv2.equalizeHist(gray)
                        cv2.imwrite(name, gray)

                for c in contours:
                    x, y, w, h = cv2.boundingRect(c)
                    cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0))
            elif self.prefix is not None:
                name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag,
                                                        header.seq, )
                cv2.imwrite(name, cv_image)

        cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(tag, 600, 600)
        cv2.imshow(tag, cv_image)
        cv2.waitKey(1)
项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def getweight(self, mask_mat=None):
        #gray_mask = cv2.cvtColor(mask_mat, cv2.COLOR_BGR2GRAY)
        gray_mask=mask_mat
        ret, bin_mask = cv2.threshold(gray_mask,1,1,0)
        _, contours, _ = cv2.findContours(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        weights = np.zeros_like(bin_mask, dtype=np.float)

        weights = cv2.drawContours(weights, contours, -1, (1), 5)
        weights = cv2.GaussianBlur(weights, (41,41), 1000)
        weights = np.multiply(weights,10)+0.6
        return weights
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_robot(im):
    hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    lower = np.array([50, 28, 0])
    upper = np.array([60, 168, 255])
    mask = cv2.inRange(hsv, lower, upper)
    result = cv2.bitwise_and(im, im, mask=mask)
    blur = cv2.blur(result, (5, 5))
    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)
    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)
    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]

    x = 0
    y = 0
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x / len(cnt)
    y = y / len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(im, (x, y), 5, (255, 0, 255), 2)
    #show_image(im)
    return (int(x), int(y))
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_robot(frame):
    im = copy.copy(frame)
    hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    lower = np.array([50, 28, 0])
    upper = np.array([60, 168, 255])
    mask = cv2.inRange(hsv, lower, upper)
    result = cv2.bitwise_and(im, im, mask=mask)
    blur = cv2.blur(result, (5, 5))
    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)
    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)
    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]

    x = 0
    y = 0
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x / len(cnt)
    y = y / len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(im, (x, y), 5, (255, 0, 255), 2)
    cv2.imshow('img', im)
    k = cv2.waitKey(0)
    cv2.imwrite('robot.jpg', im)
    #show_image(im)
    return (int(x), int(y))
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def get_contours(name, small, pagemask, masktype):

    mask = get_mask(name, small, pagemask, masktype)

    _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_NONE)

    contours_out = []

    for contour in contours:

        rect = cv2.boundingRect(contour)
        xmin, ymin, width, height = rect

        if (width < TEXT_MIN_WIDTH or
                height < TEXT_MIN_HEIGHT or
                width < TEXT_MIN_ASPECT*height):
            continue

        tight_mask = make_tight_mask(contour, xmin, ymin, width, height)

        if tight_mask.sum(axis=0).max() > TEXT_MAX_THICKNESS:
            continue

        contours_out.append(ContourInfo(contour, rect, tight_mask))

    if DEBUG_LEVEL >= 2:
        visualize_contours(name, small, contours_out)

    return contours_out
项目:spockpy    作者:achillesrasquinha    | 项目源码 | 文件源码
def _get_contours(array):
    major = _get_opencv_version()[0]

    if major == 3:
        _, contours, _ = cv2.findContours(array, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    else:
        _, contours    = cv2.findContours(array, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    return contours
项目:image_text_reader    作者:yardstick17    | 项目源码 | 文件源码
def contour_plot_on_text_in_image(inv_img):
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 2))
    dilated = cv2.dilate(inv_img, kernel, iterations=7)  # dilate
    _, contours, hierarchy = cv2.findContours(
        dilated,
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_NONE)  # get contours
    return contours
项目:Sleep-Early    作者:AliNL    | 项目源码 | 文件源码
def diff_rect(img1, img2, pos=None):
    """find counters include pos in differences between img1 & img2 (cv2 images)"""
    diff = cv2.absdiff(img1, img2)
    diff = cv2.GaussianBlur(diff, (3, 3), 0)
    edges = cv2.Canny(diff, 100, 200)
    _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    if not contours:
        return None
    contours.sort(key=lambda c: len(c))
    # no pos provide, just return the largest different area rect
    if pos is None:
        cnt = contours[-1]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        return (x0, y0, x1, y1)
    # else the rect should contain the pos
    x, y = pos
    for i in range(len(contours)):
        cnt = contours[-1-i]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        if x0 <= x <= x1 and y0 <= y <= y1:
            return (x0, y0, x1, y1)
项目:perception    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def find_contours(self, min_area=0.0, max_area=np.inf):
        """Returns a list of connected components with an area between
        min_area and max_area.
        Parameters
        ----------
        min_area : float
            The minimum area for a contour
        max_area : float
            The maximum area for a contour
        Returns
        -------
        :obj:`list` of :obj:`Contour`
            A list of resuting contours
        """
        # get all contours (connected components) from the binary image
        _, contours, hierarchy = cv2.findContours(
            self.data.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        num_contours = len(contours)
        kept_contours = []

        # find which contours need to be pruned
        for i in range(num_contours):
            area = cv2.contourArea(contours[i])
            logging.debug('Contour %d area: %.3f' % (len(kept_contours), area))
            if area > min_area and area < max_area:
                boundary_px = contours[i].squeeze()
                boundary_px_ij_swapped = np.zeros(boundary_px.shape)
                boundary_px_ij_swapped[:, 0] = boundary_px[:, 1]
                boundary_px_ij_swapped[:, 1] = boundary_px[:, 0]
                kept_contours.append(
                    Contour(
                        boundary_px_ij_swapped,
                        area=area,
                        frame=self._frame))

        return kept_contours
项目:VisionTest    作者:SamCB    | 项目源码 | 文件源码
def retrieve_subsections(img):
    """Yield coordinates of boxes that contain interesting images

    Yields x, y coordinates; width and height as a tuple

    An example use:

        images = []
        for x, y, w, h in retrieve_subsections(img):
            image.append(img[y:y+h,x:x+w])
    """
    if len(img.shape) == 3:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        gray = img
    results = cv2.cornerHarris(gray, 9, 3, 0.04)
    # Normalise harris points between 0 and 1
    hmin = results.min()
    hmax = results.max()
    results = (results - hmin)/(hmax-hmin)

    # Blur so we retrieve the surrounding details
    results = cv2.GaussianBlur(results, (31, 31), 5)

    # Create a threshold collecting the most interesting areas
    threshold = np.zeros(results.shape, dtype=np.uint8)
    threshold[results>results.mean() * 1.01] = 255

    # Find the bounding box of each threshold, and yield the image
    contour_response = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

    # Different versions of cv2 return a different number of attributes
    if len(contour_response) == 3:
        contours = contour_response[1]
    else:
        contours = contour_response[0]

    for contour in contours:
        # x, y, w, h
        yield cv2.boundingRect(contour)
项目:Vec-Lib    作者:vladan-jovicic    | 项目源码 | 文件源码
def detect_contours(self):
        blurred = cv2.GaussianBlur(self.src, (self.kernel_size, self.kernel_size), self.sigma)

        # apply canny detector
        detected_edges = cv2.Canny(blurred, self.threshold, self.threshold * self.ratio, apertureSize=self.apertureSize, L2gradient=True)

        if self.use_dilate:
            kernel = np.ones((3, 3), np.uint8)
            detected_edges = cv2.morphologyEx(detected_edges, cv2.MORPH_CLOSE, kernel)

        self.contours_img, self.simple_contours, self.hierarchy = cv2.findContours(detected_edges.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
        # pdb.gimp_message(self.hierarchy)
        _, self.full_contours, _ = cv2.findContours(detected_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
项目:Automatic-Plate-Number-Recognition-APNR    作者:kagan94    | 项目源码 | 文件源码
def find_contours(img):
    '''
    :param img: (numpy array)
    :return: all possible rectangles (contours)
    '''
    img_blurred = cv2.GaussianBlur(img, (5, 5), 1)  # remove noise
    img_gray = cv2.cvtColor(img_blurred, cv2.COLOR_BGR2GRAY)  # greyscale image
    # cv2.imshow('', img_gray)
    # cv2.waitKey(0)

    # Apply Sobel filter to find the vertical edges
    # Find vertical lines. Car plates have high density of vertical lines
    img_sobel_x = cv2.Sobel(img_gray, cv2.CV_8UC1, dx=1, dy=0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
    # cv2.imshow('img_sobel', img_sobel_x)

    # Apply optimal threshold by using Oslu algorithm
    retval, img_threshold = cv2.threshold(img_sobel_x, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
    # cv2.imshow('s', img_threshold)
    # cv2.waitKey(0)

    # TODO: Try to apply AdaptiveThresh
    # Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
    # gaus_threshold = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 115, 1)
    # cv2.imshow('or', img)
    # cv2.imshow('gaus', gaus_threshold)
    # cv2.waitKey(0)

    # Define a stuctural element as rectangular of size 17x3 (we'll use it during the morphological cleaning)
    element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))

    # And use this structural element in a close morphological operation
    morph_img_threshold = deepcopy(img_threshold)
    cv2.morphologyEx(src=img_threshold, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img_threshold)
    # cv2.dilate(img_threshold, kernel=np.ones((1,1), np.uint8), dst=img_threshold, iterations=1)
    # cv2.imshow('Normal Threshold', img_threshold)
    # cv2.imshow('Morphological Threshold based on rect. mask', morph_img_threshold)
    # cv2.waitKey(0)

    # Find contours that contain possible plates (in hierarchical relationship)
    contours, hierarchy = cv2.findContours(morph_img_threshold,
                                           mode=cv2.RETR_EXTERNAL,  # retrieve the external contours
                                           method=cv2.CHAIN_APPROX_NONE)  # all pixels of each contour

    plot_intermediate_steps = False
    if plot_intermediate_steps:
        plot(plt, 321, img, "Original image")
        plot(plt, 322, img_blurred, "Blurred image")
        plot(plt, 323, img_gray, "Grayscale image", cmap='gray')
        plot(plt, 324, img_sobel_x, "Sobel")
        plot(plt, 325, img_threshold, "Threshold image")
        # plot(plt, 326, morph_img_threshold, "After Morphological filter")
        plt.tight_layout()
        plt.show()

    return contours
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_goal(frame):
    # converting to HSV

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #show_image(hsv)

    lower_blue = np.array([113, 40, 29])
    upper_blue = np.array([123, 180, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    #show_image(mask)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    #show_image(result)
    blur = cv2.blur(result, (5, 5))

    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)

    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    # th3 = cv2.adaptiveThreshold(bw2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    # cv2.THRESH_BINARY,11,2)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)

    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # print len(contours)
    # if(len(contours) > 5):
    #    continue
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]
    if (len(cnt) == 0):
        return (-1, -1)
    cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
    x = 0
    y = 0
    #print 'find goal'
    #print len(cnt), j
    #print cnt
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x/len(cnt)
    y = y/len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(frame, (x, y), 5, (255, 0, 255), -1)

    #cv2.imshow('image', frame)
    #k = cv2.waitKey(0)

    return (int(x), int(y))
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_goal(img):
    # converting to HSV
    frame = copy.copy(img)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #show_image(hsv)

    lower_blue = np.array([113, 40, 29])
    upper_blue = np.array([123, 180, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    #show_image(mask)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    #show_image(result)
    blur = cv2.blur(result, (5, 5))

    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)

    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    # th3 = cv2.adaptiveThreshold(bw2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    # cv2.THRESH_BINARY,11,2)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)

    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # print len(contours)
    # if(len(contours) > 5):
    #    continue
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]
    if (len(cnt) == 0):
        return (-1, -1)
    cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
    x = 0
    y = 0
    #print 'find goal'
    #print len(cnt), j
    #print cnt
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x/len(cnt)
    y = y/len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(frame, (x, y), 5, (255, 0, 255), -1)

    cv2.imshow('image', frame)
    cv2.imwrite('goal.jpg', frame)
    k = cv2.waitKey(0)

    return (int(x), int(y))
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def find_goal(frame):
    # converting to HSV

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    #show_image(hsv)

    lower_blue = np.array([113, 40, 29])
    upper_blue = np.array([123, 180, 255])

    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    #show_image(mask)
    result = cv2.bitwise_and(frame, frame, mask=mask)
    #show_image(result)
    blur = cv2.blur(result, (5, 5))

    bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR)
    bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)

    ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY)
    # th3 = cv2.adaptiveThreshold(bw2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
    # cv2.THRESH_BINARY,11,2)
    edges = cv2.Canny(th3, 100, 200)
    th4 = copy.copy(th3)

    perimeter = 0
    j = 0
    image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # print len(contours)
    # if(len(contours) > 5):
    #    continue
    cnt = np.array([])
    for i in range(len(contours)):
        if (perimeter < cv2.contourArea(contours[i])):
            perimeter = cv2.contourArea(contours[i])
            j = i;
            cnt = contours[j]
    if (len(cnt) == 0):
        return (-1, -1)
    cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
    x = 0
    y = 0
    #print 'find goal'
    #print len(cnt), j
    #print cnt
    for i in range(len(cnt)):
        x = x + cnt[i][0][0]
        y = y + cnt[i][0][1]
    x = x/len(cnt)
    y = y/len(cnt)
    #print x, y
    x = int(x)
    y = int(y)
    cv2.circle(frame, (x, y), 5, (255, 0, 255), -1)

    #cv2.imshow('image', frame)
    #k = cv2.waitKey(0)

    return (int(x), int(y))
项目:vision-code    作者:FIRST-Team-1699    | 项目源码 | 文件源码
def main():
    """im = cv2.imread('307.jpg')
    imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(imgray, 85, 120, 0)
    """

    image = cv2.imread('307.jpg')

    lower = (120, 120, 0)
    upper = (190, 200, 150)

    # create NumPy arrays from the boundaries
    lower = np.array(lower, dtype="uint8")
    upper = np.array(upper, dtype="uint8")

    # find the colors within the specified boundaries and apply
    # the mask
    mask = cv2.inRange(image, lower, upper)


    output = cv2.bitwise_and(image, image, mask=mask)

    imgray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(imgray, 85, 120, 0)

    # Detect contours using both methods on the same image
    _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    # Copy over the original image to separate variables
    img1 = image.copy()

    new_contours = []

    min_area = float(input("Enter the minimum area: "))

    for x in contours:
        if (cv2.contourArea(x) < min_area):
            pass
        else:
            new_contours.append(x)
        pass

    # Draw both contours onto the separate images
    cv2.drawContours(img1, new_contours, -1, (2, 21, 200), 3)

    print cv2.contourArea(new_contours[0])

    # Now show the image
    print(new_contours)
    #cv2.imwrite('test-process' + str(int(min_area)) + '.jpg', img1)
    cv2.imshow('Output', img1)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:ghetto_omr    作者:pohzhiee    | 项目源码 | 文件源码
def contouring(img,match_coeff):
    #Defining coefficients
    #----------------------------------
    #Max value of contour shape matching coefficient
    match_coeff = 0.1
    #max contour area
    max_cont_area = 100
    #----------------------------------
    #find countours
    im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    #truncate contours less than predefined maximum area
    c_counter = 0
    for c in contours:

        A = cv2.contourArea(c)

        if A<max_cont_area:
            contours=np.delete(contours,c_counter,0)
            c_counter=c_counter-1
        c_counter=c_counter+1

    #length of truncated contour array
    clen=c_counter

    #create match_array [dimension = len x len] with 0s
    match_array=np.zeros((clen,clen),np.uint8)

    #loop over the contours and compare two by two
    icounter = 0
    for i in contours:
        jcounter = 0

        for j in contours:
        #If difference has index <0.01 then regard as TRUE
            ret=cv2.matchShapes(i,j,1,0.0)
            if ret<match_coeff:
                match_array[icounter,jcounter]=1
            else:
                match_array[icounter,jcounter]=0
            jcounter=jcounter+1
        icounter=icounter+1


    #sum each row of the array (for TRUEs and FALSEs]
    sum_array=np.sum(match_array,axis=1,dtype=np.uint16)
    #finding mean of the comparison value
    sum_all=np.sum(sum_array,axis=0,dtype=np.uint16)
    ave_sim_val=sum_all/clen
    #Assumption: there is a lot of 1s
    return contours,sum_array,ave_sim_val
项目:pytesseractID    作者:iChenwin    | 项目源码 | 文件源码
def detectTextRects(image, imageScale):
    # letterBoxes
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    threshold = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)[1]

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (130, 20))
    result = cv2.dilate((255 - threshold), kernel)

    # // ????????????????
    contours = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]

    maxValue = 200 * imageScale
    minValue = 40 * imageScale

    boundRect = []
    for points in contours:
        appRect = cv2.boundingRect(points)  # x y w h

        if (appRect[3] > maxValue and appRect[2] > maxValue):
            continue

        if (appRect[3] < minValue or appRect[2] < minValue):
            continue
        appRect = list(appRect)
        appRect[2] += 60 * imageScale
        appRect[3] += 15 * imageScale
        appRect[0] -= 30 * imageScale
        appRect[1] -= 7.5 * imageScale
        boundRect.append(tuple(appRect))
    return boundRect


# ??????shell?????
# def image_to_string(img, cleanup=True, plus=''):
#     # cleanup?True???????????????
#     # plus????tesseract???????
#     try:
#         subprocess.check_output('tesseract ' + img + ' ' + img + ' ' + plus, shell=True)  # ????txt??
#     except subprocess.CalledProcessError as e:
#         return ""
#     text = ''
#     with open(img + '.txt', 'r') as f:
#         text = f.read().strip()
#     if cleanup:
#         os.remove(img + '.txt')
#     return text

# ?????
项目:pytesseractID    作者:iChenwin    | 项目源码 | 文件源码
def detectTextRects(image, imageScale):
    # letterBoxes
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    threshold = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)[1]

    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (130, 20))
    result = cv2.dilate((255 - threshold), kernel)

    # // ????????????????
    contours = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]

    maxValue = 200 * imageScale
    minValue = 40 * imageScale

    boundRect = []
    for points in contours:
        appRect = cv2.boundingRect(points)  # x y w h

        if (appRect[3] > maxValue and appRect[2] > maxValue):
            continue

        if (appRect[3] < minValue or appRect[2] < minValue):
            continue
        appRect = list(appRect)
        appRect[2] += 60 * imageScale
        appRect[3] += 15 * imageScale
        appRect[0] -= 30 * imageScale
        appRect[1] -= 7.5 * imageScale
        boundRect.append(tuple(appRect))
    return boundRect


# ??????shell?????
# def image_to_string(img, cleanup=True, plus=''):
#     # cleanup?True???????????????
#     # plus????tesseract???????
#     try:
#         subprocess.check_output('tesseract ' + img + ' ' + img + ' ' + plus, shell=True)  # ????txt??
#     except subprocess.CalledProcessError as e:
#         return ""
#     text = ''
#     with open(img + '.txt', 'r') as f:
#         text = f.read().strip()
#     if cleanup:
#         os.remove(img + '.txt')
#     return text

# ?????
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def detect_ball_position(self, img_hsv):
        """
        Finds the ball in the image.

        The algorithm is based on the ball color and does not use edge
        recognition to find the ball. As long as the ball color differs from
        the other colors in the image, it works well and is a save way to find
        the ball.
        First, the image is searched for pixels with similar color to the ball
        color creatinga mask. The mask should contain a white point (the ball).
        To ensure that the ball is found, the contours of the mask are found.
        If there are more than one element with contours, a simple
        circle-similarity measure is calculated.
        The element with the highest similarity to a circle is considered as
        the ball.
        :param img_hsv: HSV-image to find the ball on
        :return: None
        """
        # TODO: also include the expected ball size into the decision
        x_mean = []
        y_mean = []
        dist = []
        self.curr_ball_position = (0, 0)

        # Get the areas of the image, which have a similar color to the ball color
        lower_color = np.asarray(self.ball_color)
        upper_color = np.asarray(self.ball_color)
        lower_color = lower_color - [10, 50, 50]  # good values (for test video are 10,50,50)
        upper_color = upper_color + [10, 50, 50]  # good values (for test video are 10,50,50)
        lower_color[lower_color < 0] = 0
        lower_color[lower_color > 255] = 255
        upper_color[upper_color < 0] = 0
        upper_color[upper_color > 255] = 255
        mask = cv2.inRange(img_hsv, lower_color, upper_color)
        mask = self._smooth_ball_mask(mask)

        # Find contours in the mask, at the moment only one contour is expected
        im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # For every contour found, the center is calculated (by averaging the
        # points), and the circe-comparison is done.
        element_ctr = 0
        for element in contours:
            element = element[:,0,:]
            x_mean.append(int(np.round(np.mean(element[:,0]))))
            y_mean.append(int(np.round(np.mean(element[:,1]))))
            element_ctr += 1

            dist.append(self._check_circle(element))

        if element_ctr <= 0 or min(dist) > self.ball_detection_threshold:
            # If there is nothin found or it does not look like a circle, it is
            # assumed that there is no ball in the image.
            self.curr_ball_position = (-1, -1)
            #print("No ball detected")  # TODO: give that message to the interface
        else:
            # Otherwise the element with the best similarity to a circle is chosen
            # to be considered as the ball.
            self.curr_ball_position = (x_mean[np.argmin(dist)], y_mean[np.argmin(dist)])

        self.__store_ball_position(self.curr_ball_position)