Python cv2 模块,RETR_EXTERNAL 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.RETR_EXTERNAL

项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        return vertices
项目:APEX    作者:ymollard    | 项目源码 | 文件源码
def find_center(self, name, frame, mask, min_radius):
        if name not in self.pts:
            self.pts[name] = deque(maxlen=self.params['tracking']['buffer_size'])

        # find contours in the mask and initialize the current (x, y) center of the ball
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            center = (int(x), int(y))

            # only proceed if the radius meets a minimum size
            if radius > min_radius:
                # draw the circle and centroid on the frame, then update the list of tracked points
                cv2.circle(frame, center, int(radius), (0, 255, 255), 2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)
                self.pts[name].appendleft(center)
                smooth_points = 8
                return (int(np.mean([self.pts[name][i][0] for i in range(min(smooth_points, len(self.pts[name])))])),
                        int(np.mean([self.pts[name][i][1] for i in range(min(smooth_points, len(self.pts[name])))]))), radius
        return None, None
项目:PiStorms    作者:mindsensors    | 项目源码 | 文件源码
def findSquare( self,frame ):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 60, 60)
        # find contours in the edge map
        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # loop over our contours to find hexagon
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50]
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.004 * peri, True)
            # if our approximated contour has four points, then
            # we can assume that we have found our squeare

            if len(approx) >= 4:
                screenCnt = approx
                x,y,w,h = cv2.boundingRect(c)
                cv2.drawContours(image, [approx], -1, (0, 0, 255), 1)
                #cv2.imshow("Screen", image)
                #create the mask and remove rest of the background
                mask = np.zeros(image.shape[:2], dtype = "uint8")
                cv2.drawContours(mask, [screenCnt], -1, 255, -1)
                masked = cv2.bitwise_and(image, image, mask = mask)
                #cv2.imshow("Masked",masked  )
                #crop the masked image to to be compared to referance image
                cropped = masked[y:y+h,x:x+w]
                #scale the image so it is fixed size as referance image
                cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA)

                return cropped
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST):
        '''
        contour_retrieval_mode is passed through as second argument to cv2.findContours
        '''

        # Attempt to match edges found in blue, green or red channels : collect all
        channel = 0
        for gray in cv2.split(self.img):
            channel += 1
            print('channel %d ' % channel)
            title = self.tgen.next('channel-%d' % channel)
            if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
            found = {}
            for thrs in xrange(0, 255, 26):
                print('Using threshold %d' % thrs)
                if thrs == 0:
                    print('First step')
                    bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                    title = self.tgen.next('canny-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                    bin = cv2.dilate(bin, None)
                    title = self.tgen.next('canny-dilate-%d' % channel)
                    if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                else:
                    retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
                    title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs))
                    if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title)
                bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE)
                title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs))
                if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title)
                if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL:
                    filteredContours = contours
                else:
                    filteredContours = []
                    h = hierarchy[0]
                    for component in zip(contours, h):
                        currentContour = component[0]
                        currentHierarchy = component[1]
                        if currentHierarchy[3] < 0:
                            # Found the outermost parent component
                            filteredContours.append(currentContour)
                    print('Contours filtered.   Input %d  Output %d' % (len(contours), len(filteredContours)))
                    time.sleep(5)
                for cnt in filteredContours:
                    cnt_len = cv2.arcLength(cnt, True)
                    cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                    cnt_len = len(cnt)
                    cnt_area = cv2.contourArea(cnt)
                    cnt_isConvex = cv2.isContourConvex(cnt)
                    if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max)  and cnt_isConvex:
                        cnt = cnt.reshape(-1, 2)
                        max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                        if max_cos < self.cos_limit :
                            sq = Square(cnt, cnt_area, cnt_isConvex, max_cos)
                            self.squares.append(sq)
                        else:
                            #print('dropped a square with max_cos %f' % max_cos)
                            pass
                found[thrs] = len(self.squares)
                print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
项目:DeepFryBot    作者:asdvek    | 项目源码 | 文件源码
def find_chars(img):
    gray = np.array(img.convert("L"))
    ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
    image_final = cv2.bitwise_and(gray, gray, mask=mask)
    ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new_img, kernel, iterations=1)
    # Image.fromarray(dilated).save('out.png') # for debugging
    _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    coords = []
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # ignore large chars (probably not chars)
        if w > 70 and h > 70:
            continue
        coords.append((x, y, w, h))
    return coords


# find list of eye coordinates in image
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images)
项目:PyFRAP    作者:alexblaessle    | 项目源码 | 文件源码
def getContours(img,kernel=(10,10)):

    #Define kernel
    kernel = np.ones(kernel, np.uint8)

    #Open to erode small patches
    thresh = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)

    #Close little holes
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,kernel, iterations=4)

    #Find contours
    #contours=skimsr.find_contours(thresh,0)

    thresh=thresh.astype('uint8')
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)

    areas=[]
    for c in contours:
        areas.append(cv2.contourArea(c))

    return contours,thresh,areas
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def findSignificantContours(img, sobel_8u, sobel):
    image, contours, heirarchy = cv2.findContours(sobel_8u, \
                                                  cv2.RETR_EXTERNAL, \
                                                  cv2.CHAIN_APPROX_SIMPLE)
    mask = np.ones(image.shape[:2], dtype="uint8") * 255

    level1 = []
    for i, tupl in enumerate(heirarchy[0]):

        if tupl[3] == -1:
            tupl = np.insert(tupl, 0, [i])
            level1.append(tupl)
    significant = []
    tooSmall = sobel_8u.size * 10 / 100
    for tupl in level1:
        contour = contours[tupl[0]];
        area = cv2.contourArea(contour)
        if area > tooSmall:
            cv2.drawContours(mask, \
                             [contour], 0, (0, 255, 0), \
                             2, cv2.LINE_AA, maxLevel=1)
            significant.append([contour, area])
    significant.sort(key=lambda x: x[1])
    significant = [x[0] for x in significant];
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
    mask = sobel.copy()
    mask[mask > 0] = 0
    cv2.fillPoly(mask, significant, 255, 0)
    mask = np.logical_not(mask)
    img[mask] = 0;

    return img
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def find_contours(mask, smooth_factor=0.005):
        """ Find the contours in a given mask """
        border = 5
        # Canny detection breaks down with the edge of the image
        my_mask = cv2.copyMakeBorder(mask, border, border, border, border,
                                     cv2.BORDER_CONSTANT, value=(0, 0, 0))

        my_mask = cv2.cvtColor(my_mask, cv2.COLOR_BGR2GRAY)

        if is_cv2():
            contours, _ = cv2.findContours(my_mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, _ = cv2.findContours(my_mask, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE)

        # shift the contours back down
        for contour in contours:
            for pnt in contour:
                if pnt[0][1] > border:
                    pnt[0][1] = pnt[0][1] - border
                else:
                    pnt[0][1] = 0
                if pnt[0][0] > border:
                    pnt[0][0] = pnt[0][0] - border
                else:
                    pnt[0][0] = 0

        closed_contours = []
        for contour in contours:
            epsilon = smooth_factor*cv2.arcLength(contour, True)
            approx = cv2.approxPolyDP(contour, epsilon, True)
            area = cv2.contourArea(approx)
            # if they are too small they are not edges
            if area < 200:
                continue
            closed_contours.append(approx)

        return closed_contours
项目:vision    作者:SouthEugeneRoboticsTeam    | 项目源码 | 文件源码
def get_largest(im, n):
    # Find contours of the shape
    major = cv2.__version__.split('.')[0]
    if major == '3':
        _, contours, _ = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    else:
        contours, _ = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # Cycle through contours and add area to array
    areas = []
    for c in contours:
        areas.append(cv2.contourArea(c))

    # Sort array of areas by size
    sorted_areas = sorted(zip(areas, contours), key=lambda x: x[0], reverse=True)

    if sorted_areas and len(sorted_areas) >= n:
        # Find nth largest using data[n-1][1]
        return sorted_areas[n - 1][1]
    else:
        return None
项目:osrmacro    作者:jjvilm    | 项目源码 | 文件源码
def findFishingIcon():
    #fish color
    low = np.array([93,119,84])
    high = np.array([121,255,255])
    mask, mm_x, mm_y = get_mini_map_mask(low, high)

    _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for c in contours:
        (x, y, w, h) = cv2.boundingRect(c)
        x += mm_x
        y += mm_y
        x2 = x + w
        y2 = y + h
        Mouse.randMove(x,y,x2,y2,1)
        run= 0
        RandTime.randTime(1,0,0,1,9,9)
        return 0
    return 1
项目:osrmacro    作者:jjvilm    | 项目源码 | 文件源码
def findBankIcon(self):
        # bank colo
        low = np.array([26,160,176])
        high = np.array([27,244,228])
        mask, mm_x, mm_y = self.mini_map_mask(low, high)

        cv2.imshow('mask', mask)
        cv2.waitKey(0)

        _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        for c in contours:
            (x, y, w, h) = cv2.boundingRect(c)
            x += 568 
            y += 36
            x2 = x + w 
            y2 = y + h 
            Mouse.randMove(x,y,x2,y2,1)
            run= 0 
            time.sleep(1)
            return
项目:document-layout-analysis    作者:rbaguila    | 项目源码 | 文件源码
def process_letter(thresh,output):  
    # assign the kernel size    
    kernel = np.ones((2,1), np.uint8) # vertical
    # use closing morph operation then erode to narrow the image    
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
    # temp_img = cv2.erode(thresh,kernel,iterations=2)      
    letter_img = cv2.erode(temp_img,kernel,iterations=1)

    # find contours 
    (contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    # loop in all the contour areas
    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   


#processing letter by letter boxing
项目:document-layout-analysis    作者:rbaguila    | 项目源码 | 文件源码
def process_word(thresh,output):    
    # assign 2 rectangle kernel size 1 vertical and the other will be horizontal    
    kernel = np.ones((2,1), np.uint8)
    kernel2 = np.ones((1,4), np.uint8)
    # use closing morph operation but fewer iterations than the letter then erode to narrow the image   
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2)
    #temp_img = cv2.erode(thresh,kernel,iterations=2)   
    word_img = cv2.dilate(temp_img,kernel2,iterations=1)

    (contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   

#processing line by line boxing
项目:document-layout-analysis    作者:rbaguila    | 项目源码 | 文件源码
def process_line(thresh,output):    
    # assign a rectangle kernel size    1 vertical and the other will be horizontal
    kernel = np.ones((1,5), np.uint8)
    kernel2 = np.ones((2,4), np.uint8)  
    # use closing morph operation but fewer iterations than the letter then erode to narrow the image   
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2)
    #temp_img = cv2.erode(thresh,kernel,iterations=2)   
    line_img = cv2.dilate(temp_img,kernel,iterations=5)

    (contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   

#processing par by par boxing
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def find(self, mode):
        self.mode = mode
        if mode == 1:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding'
            self.gaussianBlur()
            self.cannyThresholding()
        elif mode == 2:
            # Check Gimp Hints
            self.gimpMarkup()
            #self.cannyThresholding()
            self.modeDesc = 'Run gimpMarkup'
        elif mode == 3:
            # Massively mask red as a precursor phase
            self.gaussianBlur()
            self.colourMapping()
            self.solidRedFilter()
            #self.cannyThresholding()
            self.modeDesc = 'Run gaussianBlur(), colourMapping(), solidRedFilter(), #cannyThresholding'
        elif mode == 4:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding with RETR_EXTERNAL contour removal mode'
            self.gaussianBlur()
            self.cannyThresholding(cv2.RETR_EXTERNAL)
        elif mode == 5:
            self.modeDesc = 'Run gaussianBlur(), then cannyThresholding with RETR_TREE contour removal mode'
            self.gaussianBlur()
            self.cannyThresholding(cv2.RETR_TREE)
# Apply Heuristics to filter out false
        self.squares = filterContoursRemove(self.img, self.squares)
        return self.squares
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def binaryContoursNestingFilterHeuristic(img, cnts, *args, **kwargs):
    '''
    Concept  : Use the found contours, with binary drawn contours to extract hierarchy and hence filter on nesting.
    Critique : WIP
    '''
    # Set the image to black (0): 
    img[:,:] = (0,0,0)
    # Draw all of the contours on the image in white
    contours = [c.contour for c in cnts]
    cv2.drawContours( img, contours, -1, (255, 255, 255), 1 )
    iv = ImageViewer(img)
    iv.windowShow()
    # Now extract any channel
    gray = cv2.split(img)[0]
    iv = ImageViewer(gray)
    iv.windowShow()
    retval, bin = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Now find the contours again, but this time we care about hierarchy (hence _TREE) - we get back next, previous, first_child, parent
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    # Alternative flags : only take the external contours
    bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    iv = ImageViewer(bin)
    iv.windowShow()
    return cnts
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def usage():
    print('''
    piwall.py 
     --vssdemo|-v rotating    : iterate the VideoSquareSearch over rotating video, and output located data in piwall-search-mono.avi
     --vssdemo|-v album       : iterate the VideoSquareSearch over sequence of images, and output located data in album.avi
     --sfv3mode|-s [mode 1-3] : run the SquareLocatorV3 algorithm : set the mode 1-3     < default image 2x2-red-1.jpg >
                               : 1 => call gaussianBlur(); cannyThresholding()
                               : 2 => call gimpMarkup()
                               : 3 => call gaussianBlur(); colourMapping(); solidRedFilter(); [#cannyThresholding]
                               : 4 => as 1, but with cv2.RETR_EXTERNAL as contour_retrieval_mode
                               : 5 => as 1, but with cv2.RETR_TREE as contour_retrieval_mode, then filter only outermost contours
                               : 6 => new model which takes a series of images which have transitions that identify the monitors.
     --sfv3img|-i [image path]: run the SquareFinderV3 algorithm  : set the input image  < default mode 1>
     --sfv4glob|-g [image glob pattern] : set the series of input images to be pattern-[%03d].jpg
    ''')
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_contours(image):
  #return cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE);
  #return cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
  return cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE);
项目:robot-camera-platform    作者:danionescu0    | 项目源码 | 文件源码
def find(self, image):
        hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1])
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        if len(contours) == 0:
            return (False, False)
        largest_contour = max(contours, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(largest_contour)
        M = cv2.moments(largest_contour)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        return (center, radius)
项目:dminer    作者:infosecanon    | 项目源码 | 文件源码
def process_captcha(self, image):
        """
        TODO: DOC
        """
        cv2_img = cv2.cvtColor(numpy.array(image), cv2.COLOR_BGR2GRAY)

        # Find the threshold of the image so that we can identify contours.
        ret, thresh = cv2.threshold(
            cv2_img,
            127,
            255,
            cv2.ADAPTIVE_THRESH_GAUSSIAN_C
        )
        # Find the contours of the image
        _, contours, hierarchy = cv2.findContours(
            thresh,
            cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE
        )

        # Find the largest contour in the image with 4 points. This is the
        # rectangle that is required to crop to for the captcha.
        largest_contour = None
        for contour in contours:
            if (len(cv2.approxPolyDP(contour, 0.1*cv2.arcLength(contour, True), True)) == 4) and (2500 < cv2.contourArea(contour) < 4000):
                if isinstance(largest_contour, type(None)):
                    largest_contour = contour
                    continue
                if cv2.contourArea(contour) > cv2.contourArea(largest_contour):
                    largest_contour = contour
        # If we don't have a matching contour, don't try to crop and such
        if isinstance(largest_contour, type(None)):
            return None

        # If we do have a matching contour, build the rectangle
        crop_x, crop_y, crop_width, crop_height = cv2.boundingRect(
            largest_contour
        )
        # Crop down to the contour rectangle
        image = image.crop(
            (
                crop_x,
                crop_y,
                crop_x + crop_width,
                crop_y + crop_height
            )
        )
        return image
项目:pyceratOpsRecs    作者:USCSoftwareEngineeringClub    | 项目源码 | 文件源码
def segment(im):
    """
    :param im:
        Image to detect digits and operations in

    """

    gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) #grayscale
    blur = cv2.GaussianBlur(gray,(5,5),0) #smooth image to reduce noise
    #adaptive thresholding for different lighting conditions
    thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)

    #################     Now finding Contours     ###################
    image,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    samples =  np.empty((0,100))
    keys = [i for i in range(48,58)]

    for cnt in contours:
        if cv2.contourArea(cnt) > 20:
            [x,y,w,h] = cv2.boundingRect(cnt)

            #Draw bounding box for it, then resize to 10x10, and store its pixel values in an array
            if  h>1:
                cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
                roi = thresh[y:y+h,x:x+w]
                roismall = cv2.resize(roi,(10,10))
                cv2.imshow('detecting',im)
                key = cv2.waitKey(0)

                if key == 27:  # (escape to quit)
                    sys.exit()
                else: #press any key to continue
                    sample = roismall.reshape((1,100))
                    samples = np.append(samples,sample,0)

    print "segmentation complete"

    cv2.imwrite('data/seg_result.png',im)
    np.savetxt('data/generalsamples.data',samples)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def is_detector(self, img):
        """ This uses color to determine if we have a detector, and if so, returns where
            the big screen and smaller screen is in the subimage """
        lower = np.array([190, 190, 0], dtype = "uint8")
        upper = np.array([255, 255, 100], dtype = "uint8")
        mask = cv2.inRange(img, lower, upper)

        contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]
        if len(contours) == 0:
            return None, False

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))
        center, radius = cv2.minEnclosingCircle(sorted_contours[0])
        up = True
        if len(contours) > 1:
            center2, radius = cv2.minEnclosingCircle(sorted_contours[1])
            if center2[1] < center[1]:
                up = False

        if self.debug:
            debug_img = img.copy()
            cv2.drawContours(debug_img, [sorted_contours[0]], -1, (0, 255, 0), 2)
            cv2.imshow("cont", debug_img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return center, up
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def is_repair_tool(self, img):
        """ This uses color to determine if we have a repair tool, and if so, returns where
            the button is located within the provided subimage """
        lower = np.array([190, 0, 0], dtype = "uint8")
        upper = np.array([255, 125, 100], dtype = "uint8")
        mask = cv2.inRange(img, lower, upper)

        contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]
        if len(contours) == 0:
            return None, False

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))
        center, radius = cv2.minEnclosingCircle(sorted_contours[0])
        up = True
        if center[1] > (img.shape[0] / 2):
            up = False

        if self.debug:
            debug_img = img.copy()
            cv2.drawContours(debug_img, [sorted_contours[0]], -1, (0, 255, 0), 2)
            cv2.imshow("cont", debug_img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        return center, up
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def _find_power_plug_thing(self):
        """ Find the power plug at the solar array box """

        """ This uses color to determine if we have a choke """
        lower = np.array([100, 40, 0], dtype = "uint8")
        upper = np.array([255, 255, 20], dtype = "uint8")
        mask = cv2.inRange(self.img, lower, upper)

        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))

        if len(sorted_contours) > 0:
            plug = self._find_a_thing(sorted_contours[0], 0, 0.06, 0, 0.06, 99.0)

            if plug is not None:
                plug.set_power_plug()
                self.things.append(plug)
                self.power_plug = plug

                if self.debug:
                    debug_img = self.img.copy()
                    for c in sorted_contours:
                        cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
                    cv2.imshow("plug picture", debug_img)
                    cv2.setMouseCallback("plug picture", self.handle_mouse)
                    cv2.waitKey(0)
                    cv2.destroyAllWindows()
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_image(self, cv_image, header, tag):
        """ process the image """
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

        # mask for color range
        if self.color_range:
            mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1])
            count = cv2.countNonZero(mask)
            if count:
                kernel = np.ones((5, 5), np.uint8)
                mask = cv2.dilate(mask, kernel, iterations=2)
                contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)

                for i, c in enumerate(contours):
                    x, y, w, h = cv2.boundingRect(c)
                    if self.prefix is not None:
                        name = '{0}{1}_{2}_{3}.png'.format(self.prefix,
                                                           tag,
                                                           header.seq, i)
                        print name
                        roi = cv_image[y:y+h, x:x+w]
                        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                        gray = cv2.equalizeHist(gray)
                        cv2.imwrite(name, gray)

                for c in contours:
                    x, y, w, h = cv2.boundingRect(c)
                    cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0))
            elif self.prefix is not None:
                name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag,
                                                        header.seq, )
                cv2.imwrite(name, cv_image)

        cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(tag, 600, 600)
        cv2.imshow(tag, cv_image)
        cv2.waitKey(1)
项目:StormCV2017    作者:2729StormRobotics    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:StormCV2017    作者:2729StormRobotics    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:Super_TF    作者:Dhruv-Mohan    | 项目源码 | 文件源码
def getweight(self, mask_mat=None):
        #gray_mask = cv2.cvtColor(mask_mat, cv2.COLOR_BGR2GRAY)
        gray_mask=mask_mat
        ret, bin_mask = cv2.threshold(gray_mask,1,1,0)
        _, contours, _ = cv2.findContours(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        weights = np.zeros_like(bin_mask, dtype=np.float)

        weights = cv2.drawContours(weights, contours, -1, (1), 5)
        weights = cv2.GaussianBlur(weights, (41,41), 1000)
        weights = np.multiply(weights,10)+0.6
        return weights
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def identify_OD(image):
    newfin = cv2.dilate(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations=2)
    mask = np.ones(newfin.shape[:2], dtype="uint8") * 255
    y1, ycontours, yhierarchy = cv2.findContours(newfin.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    prev_contour = ycontours[0]
    for cnt in ycontours:
        if cv2.contourArea(cnt) >= cv2.contourArea(prev_contour):
            prev_contour = cnt
            cv2.drawContours(mask, [cnt], -1, 0, -1)
    M = cv2.moments(prev_contour)
    cx = int(M['m10']/M['m00'])
    cy = int(M['m01']/M['m00'])
    #print(cx,cy)
    return (cx,cy)
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def identify_OD(image):
    newfin = cv2.dilate(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations=2)
    mask = np.ones(newfin.shape[:2], dtype="uint8") * 255
    y1, ycontours, yhierarchy = cv2.findContours(newfin.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    prev_contour = ycontours[0]
    for cnt in ycontours:
        if cv2.contourArea(cnt) >= cv2.contourArea(prev_contour):
            prev_contour = cnt
            cv2.drawContours(mask, [cnt], -1, 0, -1)
    M = cv2.moments(prev_contour)
    cx = int(M['m10']/M['m00'])
    cy = int(M['m01']/M['m00']) 
    return (cx,cy)
项目:vision-code    作者:FIRST-Team-1699    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def get_contours(name, small, pagemask, masktype):

    mask = get_mask(name, small, pagemask, masktype)

    _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_NONE)

    contours_out = []

    for contour in contours:

        rect = cv2.boundingRect(contour)
        xmin, ymin, width, height = rect

        if (width < TEXT_MIN_WIDTH or
                height < TEXT_MIN_HEIGHT or
                width < TEXT_MIN_ASPECT*height):
            continue

        tight_mask = make_tight_mask(contour, xmin, ymin, width, height)

        if tight_mask.sum(axis=0).max() > TEXT_MAX_THICKNESS:
            continue

        contours_out.append(ContourInfo(contour, rect, tight_mask))

    if DEBUG_LEVEL >= 2:
        visualize_contours(name, small, contours_out)

    return contours_out
项目:keras-autoencoder    作者:Rentier    | 项目源码 | 文件源码
def detect_ball(frame):
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    mask = cv2.inRange(hsv, greenLower, greenUpper)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
    center = None

    # only proceed if at least one contour was found
    if len(cnts) == 0:
        return

    # find the largest contour in the mask, then use
    # it to compute the minimum enclosing circle and
    # centroid
    c = max(cnts, key=cv2.contourArea)
    ((x, y), radius) = cv2.minEnclosingCircle(c)
    M = cv2.moments(c)
    center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

    if radius < 10:
        print('Too small')
        return

    return center, radius
项目:Computer-Vision    作者:PratikRamdasi    | 项目源码 | 文件源码
def processContoursinGt(path):
    folder = sort_files(path)
    length_cont_gt = []
    for i in range(20,len(folder)):
        newPath = path + "/0 (" + str(i) + ")" + ".png"
        img = cv2.imread(newPath,cv2.CV_LOAD_IMAGE_COLOR)
    #print "Image in processContour: ",img
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (31, 31), 0)
        thresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        length_cont_gt.append(len(cnts))
    return length_cont_gt
项目:Computer-Vision    作者:PratikRamdasi    | 项目源码 | 文件源码
def processContoursinFr(path1):
    folder1 = sort_files_fr(path1)
    length_cont_fr = []
    for i in range(1,len(folder1)):
        newPath = path1 + "/" + str(i)+ ".jpg"
        img = cv2.imread(newPath,cv2.CV_LOAD_IMAGE_COLOR)
    #print "Image in processContourinFr: ",img
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (31, 31), 0)
        thresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        length_cont_fr.append(len(cnts))     
    return length_cont_fr
项目:airport    作者:cfircohen    | 项目源码 | 文件源码
def FindExternalContour(image_bw):
  """Returns the largest external contour."""
  # all external contours
  _, contours, _ = cv2.findContours(image_bw.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
  logging.debug("found {} external contours in image".format(len(contours)))
  # max contour by area size
  largest = max(contours, key=lambda cnt: cv2.contourArea(cnt))
  return Rect(*cv2.boundingRect(largest))
项目:airport    作者:cfircohen    | 项目源码 | 文件源码
def ShowSolution(images, puzzle, solution, frame, box):
  cell_size = np.array([box.w / 4, box.h / 4])
  for piece_type, piece, i, j in solution:
    top_left_loc = np.array([box.x, box.y]) + (np.array([j, i]) -
                                               np.array([1, 1])) * cell_size
    color = pieces.Colors[piece_type]
    piece_img = np.zeros_like(frame)
    for square in itertools.product(range(2), range(2)):
      if piece[square] == board.SquareType.AIR:
        continue

      loc = top_left_loc + np.array(square[::-1]) * cell_size
      piece_img = cv2.rectangle(piece_img, tuple(loc), tuple(loc + cell_size),
                                color, -2)

      if piece[square] in images:
        image = cv2.resize(images[piece[square]], tuple(cell_size))
        blend = np.zeros_like(piece_img)
        blend[loc[1]:loc[1] + cell_size[1], loc[0]:loc[0] + cell_size[
            0]] = image
        piece_img = cv2.addWeighted(piece_img, 1.0, blend, 1.0, 0)

    piece_gray = cv2.cvtColor(piece_img, cv2.COLOR_RGB2GRAY)
    _, piece_gray = cv2.threshold(piece_gray, 10, 255, cv2.THRESH_BINARY)
    _, contours, _ = cv2.findContours(piece_gray, cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
    piece_img = cv2.drawContours(piece_img, contours, -1, (255, 255, 255), 3)

    frame = cv2.addWeighted(frame, 1.0, piece_img, 0.7, 0)
    cv2.imshow("Planes", frame)
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_extra(im):
    # ?????
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
    bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)

    img_show_hook("??????", bgmask)

    # ??????
    # ??????????
    im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
                                cv2.CHAIN_APPROX_SIMPLE)
    return contours
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_extra(im):
    # ?????
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
    bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)

    img_show_hook("??????", bgmask)

    # ??????
    # ??????????
    im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
                                cv2.CHAIN_APPROX_SIMPLE)
    return contours
项目:diagnose-heart    作者:woshialex    | 项目源码 | 文件源码
def get_patches(segment_arr):
    ret = []
    im = segment_arr.astype(np.uint8)
    contours = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    hulls = [cv2.convexHull(cont) for cont in contours[1]] #seems my version of CV2 (3.0) uses [1]
    for contour_idx in xrange(len(hulls)):
        cimg = np.zeros_like(im)
        cv2.drawContours(cimg, hulls, contour_idx, color=255, thickness=-1)
        pts = np.array(np.where(cimg == 255)).T
        ret.append(pts)
    return ret
项目:diagnose-heart    作者:woshialex    | 项目源码 | 文件源码
def get_patches(segment_arr):
    ret = []
    im = segment_arr.astype(np.uint8)*255
    contours = cv2.findContours(im, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    hulls = [cv2.convexHull(cont) for cont in contours[0]]
    for contour_idx in xrange(len(hulls)):
        cimg = np.zeros_like(im)
        cv2.drawContours(cimg, hulls, contour_idx, color=255, thickness=-1)
        pts = np.array(np.where(cimg == 255)).T
        ret.append(pts)
    return ret
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def _get_contours(self):
        # find contours
        _, self._contours, _ = cv2.findContours(self._frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
项目:digit-ocr    作者:Nozdi    | 项目源码 | 文件源码
def find_digits(binary_img):
    inv = cv2.bitwise_not(binary_img)
    contours, hierarchy = cv2.findContours(inv,
                                           cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)
    digits = []
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area > 500:
            [x, y, w, h] = cv2.boundingRect(cnt)
            margin = 20
            x -= margin
            y -= margin
            w += margin*2
            h += margin*2

            figure = binary_img[y: y + h, x: x + w]
            if figure.size > 0:
                digits.append({
                    'image': figure,
                    'x': x,
                    'y': y,
                    'w': w,
                    'h': h,
                })

    return digits
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def extractRoi(image, winSize, stepSize):
    # hue boundaries
    colors = [
        (15, 30)  # orange-yellow
    ]

    mask, weight_map, mask_scale = roiMask(image, colors)
    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    yield weight_map, mask_scale

    for resized in pyramid(image, winSize):
        scale = image.shape[0] / resized.shape[0]
        for x, y, w, h in boundingRects(mask_scale, contours):
            x /= scale
            y /= scale
            w /= scale
            h /= scale
            center = (min(x + w / 2, resized.shape[1]), min(y + h / 2, resized.shape[0]))
            if w > winSize[0] or h > winSize[1]:
                for x, y, window in sliding_window(resized, (int(x), int(y), int(w), int(h)), stepSize, winSize):
                    yield ((x, y, winSize[0], winSize[1]), scale, window)
            else:
                x = max(0, int(center[0] - winSize[0] / 2))
                y = max(0, int(center[1] - winSize[1] / 2))
                window = resized[y:y + winSize[1], x:x + winSize[0]]
                yield ((x, y, winSize[0], winSize[1]), scale, window)
项目:image_text_reader    作者:yardstick17    | 项目源码 | 文件源码
def contour_plot_on_text_in_image(inv_img):
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 2))
    dilated = cv2.dilate(inv_img, kernel, iterations=7)  # dilate
    _, contours, hierarchy = cv2.findContours(
        dilated,
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_NONE)  # get contours
    return contours
项目:CompetitionBot2017    作者:Seamonsters-2605    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours
项目:CompetitionBot2017    作者:Seamonsters-2605    | 项目源码 | 文件源码
def __find_contours(input, external_only):
        """Sets the values of pixels in a binary image to their distance to the nearest black pixel.
        Args:
            input: A numpy.ndarray.
            external_only: A boolean. If true only external contours are found.
        Return:
            A list of numpy.ndarray where each one represents a contour.
        """
        if(external_only):
            mode = cv2.RETR_EXTERNAL
        else:
            mode = cv2.RETR_LIST
        method = cv2.CHAIN_APPROX_SIMPLE
        im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
        return contours