Python cv2 模块,getStructuringElement() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.getStructuringElement()

项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects)
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def _extract_spots(self) -> None:
        # Dilate and Erode to 'clean' the spot (nb that this harms the number itself, so we only do it to extract spots)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        img = cv2.dilate(self._img, kernel, iterations=1)
        img = cv2.erode(img, kernel, iterations=2)
        img = cv2.dilate(img, kernel, iterations=1)

        # Perform a simple blob detect
        params = cv2.SimpleBlobDetector_Params()
        params.filterByArea = True
        params.minArea = 20  # The dot in 20pt font has area of about 30
        params.filterByCircularity = True
        params.minCircularity = 0.7
        params.filterByConvexity = True
        params.minConvexity = 0.8
        params.filterByInertia = True
        params.minInertiaRatio = 0.4
        detector = cv2.SimpleBlobDetector_create(params)
        self.spot_keypoints = detector.detect(img)

        # Log intermediate image
        img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255),
                                               flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))
项目:SudokuSolver    作者:Anve94    | 项目源码 | 文件源码
def apply_filters(self, image, denoise=False):
        """ This method is used to apply required filters to the
            to extracted regions of interest. Every square in a
            sudoku square is considered to be a region of interest,
            since it can potentially contain a value. """
        # Convert to grayscale
        source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # Denoise the grayscale image if requested in the params
        if denoise:
            denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
            source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
            # source_blur = denoised_gray
        else:
            source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
        source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
        source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
        if ENABLE_PREVIEW_ALL:
            image_preview(source_dilated)
        return source_dilated
项目:dvd    作者:ajayrfhp    | 项目源码 | 文件源码
def MoG2(vid, min_thresh=800, max_thresh=10000):
    '''
    Args    : Video object and threshold parameters
    Returns : None
    '''
    cap = cv2.VideoCapture(vid)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    connectivity = 4
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        output = cv2.connectedComponentsWithStats(
            fgmask, connectivity, cv2.CV_32S)
        for i in range(output[0]):
            if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
                cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
                    output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
        cv2.imshow('detection', frame)
    cap.release()
    cv2.destroyAllWindows()
项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __filterRedColor(image_hsv):
    """
        Filters the red color from image_hsv and returns mask.
    """
    mask1 = cv2.inRange(image_hsv, np.array([0, 100, 65]), np.array([10, 255, 255]))
    mask2 = cv2.inRange(image_hsv, np.array([155, 100, 70]), np.array([179, 255, 255]))
    mask = mask1 + mask2
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    mask = cv2.Canny(mask, 50, 100)
    mask = cv2.GaussianBlur(mask, (13, 13), 0)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    return mask
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def dif_gaus(image, lower, upper):
        lower, upper = int(lower-1), int(upper-1)
        lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0)
        upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0)
        # upper +=50
        # lower +=50
        dif = lower-upper
        # dif *= .1
        # dif = cv2.medianBlur(dif,3)
        # dif = 255-dif
        dif = cv2.inRange(dif, np.asarray(200),np.asarray(256))
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
        dif = cv2.dilate(dif, kernel, iterations=2)
        dif = cv2.erode(dif, kernel, iterations=1)
        # dif = cv2.max(image,dif)
        # dif = cv2.dilate(dif, kernel, iterations=1)
        return dif
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def erase_specular(image,lower_threshold=0.0, upper_threshold=150.0):
    """erase_specular: removes specular reflections
            within given threshold using a binary mask (hi_mask)
    """
    thresh = cv2.inRange(image,
                np.asarray(float(lower_threshold)),
                np.asarray(256.0))

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
    hi_mask = cv2.dilate(thresh, kernel, iterations=2)

    specular = cv2.inpaint(image, hi_mask, 2, flags=cv2.INPAINT_TELEA)
    # return cv2.max(hi_mask,image)
    return specular
项目:DeepFryBot    作者:asdvek    | 项目源码 | 文件源码
def find_chars(img):
    gray = np.array(img.convert("L"))
    ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
    image_final = cv2.bitwise_and(gray, gray, mask=mask)
    ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new_img, kernel, iterations=1)
    # Image.fromarray(dilated).save('out.png') # for debugging
    _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    coords = []
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # ignore large chars (probably not chars)
        if w > 70 and h > 70:
            continue
        coords.append((x, y, w, h))
    return coords


# find list of eye coordinates in image
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def __init__(self, structuring_element=None):
        """Initializes the `BackgroundSubtractorGMG`.

        *Note:* Requires OpenCV to be built with `--contrib` as it uses the
        `bgsegm` package.

        Unless a custom `structuring_element` is specified, it uses:
            `cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))`

        Args:
            structuring_element: The structuring element.
        """
        if structuring_element is None:
            self.strel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        else:
            self.strel = structuring_element
        self.fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()  # noqa: E501 pylint: disable=no-member,line-too-long
项目:doc2text    作者:jlsutherland    | 项目源码 | 文件源码
def find_components(im, max_components=16):
    """Dilate the image until there are just a few connected components.
    Returns contours for these components."""
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
    dilation = dilate(im, kernel, 6)

    count = 21
    n = 0
    sigma = 0.000

    while count > max_components:
        n += 1
        sigma += 0.005
        result = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if len(result) == 3:
            _, contours, hierarchy = result
        elif len(result) == 2:
            contours, hierarchy = result
        possible = find_likely_rectangles(contours, sigma)
        count = len(possible)

    return (dilation, possible, n)
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def calculate_entropy(image):
    entropy = image.copy()
    sum = 0
    i = 0
    j = 0
    while i < entropy.shape[0]:
        j = 0
        while j < entropy.shape[1]:
            sub_image = entropy[i:i+10,j:j+10]
            histogram = cv2.calcHist([sub_image],[0],None,[256],[0,256])
            sum = 0
            for k in range(256):
                if histogram[k] != 0:                   
                    sum = sum + (histogram[k] * math.log(histogram[k]))
                k = k + 1
            entropy[i:i+10,j:j+10] = sum
            j = j+10
        i = i+10
    ret2,th2 = cv2.threshold(entropy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    newfin = cv2.erode(th2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    return newfin
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def recognize_text(original):
    idcard = original
    gray = cv2.cvtColor(idcard, cv2.COLOR_BGR2GRAY)

    # Morphological gradient:
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    opening = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)

    # Binarization
    ret, binarization = cv2.threshold(opening, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    # Connected horizontally oriented regions
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
    connected = cv2.morphologyEx(binarization, cv2.MORPH_CLOSE, kernel)

    # find countours
    _, contours, hierarchy = cv2.findContours(
        connected, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE
    )
    return contours, hierarchy
项目:2017-robot    作者:frc1418    | 项目源码 | 文件源码
def preallocate(self, img):
        if self.size is None or self.size[0] != img.shape[0] or self.size[1] != img.shape[1]:
            h, w = img.shape[:2]
            self.size = (h, w)

            self.img = np.empty((h, w, 3), dtype=np.uint8)

            self.hsv = np.empty((h, w, 3), dtype=np.uint8)
            self.bin = np.empty((h, w, 1), dtype=np.uint8)
            self.bin2 = np.empty((h, w, 1), dtype=np.uint8)

            self.out = np.empty((h, w, 3), dtype=np.uint8)

            # for overlays
            self.zeros = np.zeros((h, w, 1), dtype=np.bool)
            self.black = np.zeros((h, w, 3), dtype=np.uint8)

            self.morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2), anchor=(0,0))

        cv2.copyMakeBorder(img, 0, 0, 0, 0, cv2.BORDER_CONSTANT, value=self.RED, dst=self.out)
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def update(dummy=None):
        sz = cv2.getTrackbarPos('op/size', 'morphology')
        iters = cv2.getTrackbarPos('iters', 'morphology')
        opers = cur_mode.split('/')
        if len(opers) > 1:
            sz = sz - 10
            op = opers[sz > 0]
            sz = abs(sz)
        else:
            op = opers[0]
        sz = sz*2+1

        str_name = 'MORPH_' + cur_str_mode.upper()
        oper_name = 'MORPH_' + op.upper()
        st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz))
        res = cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters)

        draw_str(res, (10, 20), 'mode: ' + cur_mode)
        draw_str(res, (10, 40), 'operation: ' + oper_name)
        draw_str(res, (10, 60), 'structure: ' + str_name)
        draw_str(res, (10, 80), 'ksize: %d  iters: %d' % (sz, iters))
        cv2.imshow('morphology', res)
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def __init__(self):
        super(TargetFilterBGSub, self).__init__()

        # background subtractor
        #self._bgs = cv2.BackgroundSubtractorMOG()
        #self._bgs = cv2.BackgroundSubtractorMOG2()  # not great defaults, and need bShadowDetection to be False
        #self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20)

        # varThreshold: higher values detect fewer/smaller changed regions
        self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False)

        # ??? history is ignored?  Only if learning_rate is > 0, or...?  Unclear.

        # Learning rate for background subtractor.
        # 0 = never adapts after initial background creation.
        # A bit above 0 looks good.
        # Lower values are better for detecting slower movement, though it
        # takes a bit of time to learn the background initially.
        self._learning_rate = 0.001

        # elements to reuse in erode/dilate
        # CROSS elimates more horizontal/vertical lines and leaves more
        # blobs with extent in both axes [than RECT].
        self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
        self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    img=cv2.medianBlur(img,5)
    kernel=np.ones((3,3),np.uint8)

    #img=cv2.erode(img,kernel,iterations = 1)
    sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    dilation = cv2.dilate(sobel, element2, iterations = 1)
    erosion = cv2.erode(dilation, element1, iterations = 1)
    dilation2 = cv2.dilate(erosion, element2,iterations = 3)
    #img=cv2.dilate(img,kernel,iterations = 1)
    #img=cv2.Canny(img,100,200)
    return dilation2
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def logoDetect(img,imgo):
    '''???????????????'''
    imglogo=imgo.copy()
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
    #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
    ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
    img=cv2.Canny(img,100,200)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    img = cv2.dilate(img, element2,iterations = 1)
    img = cv2.erode(img, element1, iterations = 3)
    img = cv2.dilate(img, element2,iterations = 3)

    #????
    im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    tema=0
    result=[]
    for con in contours:
        x,y,w,h=cv2.boundingRect(con)
        area=w*h
        ratio=max(w/h,h/w)
        if area>300 and area<20000 and ratio<2:
            if area>tema:
                tema=area
                result=[x,y,w,h]
                ratio2=ratio
    #?????????????????,??????????
    logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
    logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
    cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
    cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
    print tema,ratio2,result
    logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
    cv2.imwrite('./logo2.jpg',logo2)

    return img
项目:Brewereader    作者:ceafdc    | 项目源码 | 文件源码
def find_lines(img, acc_threshold=0.25, should_erode=True):
    if len(img.shape) == 3 and img.shape[2] == 3:  # if it's color
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img = cv2.GaussianBlur(img, (11, 11), 0)
    img = cv2.adaptiveThreshold(
            img,
            255,
            cv2.ADAPTIVE_THRESH_MEAN_C,
            cv2.THRESH_BINARY,
            5,
            2)

    img = cv2.bitwise_not(img)

    # thresh = 127
    # edges = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
    # edges = cv2.Canny(blur, 500, 500, apertureSize=3)

    if should_erode:
        element = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
        img = cv2.erode(img, element)

    theta = np.pi/2000
    angle_threshold = 2
    horizontal = cv2.HoughLines(
            img,
            1,
            theta,
            int(acc_threshold * img.shape[1]),
            min_theta=np.radians(90 - angle_threshold),
            max_theta=np.radians(90 + angle_threshold))
    vertical = cv2.HoughLines(
            img,
            1,
            theta,
            int(acc_threshold * img.shape[0]),
            min_theta=np.radians(-angle_threshold),
            max_theta=np.radians(angle_threshold),
            )

    horizontal = list(horizontal) if horizontal is not None else []
    vertical = list(vertical) if vertical is not None else []

    horizontal = [line[0] for line in horizontal]
    vertical = [line[0] for line in vertical]

    horizontal = np.asarray(horizontal)
    vertical = np.asarray(vertical)

    return horizontal, vertical
项目:opentrack-prototyping    作者:DaMichel    | 项目源码 | 文件源码
def simple_feature_size_filter(img, minradius, maxradius):
    feature_radius_min = minradius  | 1 # play with these to see show they affect highlighting of structures of various sizes
    feature_radius_max = maxradius | 1

    if 0:
        w = feature_radius_min*2 | 1
        blurred = cv2.GaussianBlur(img, (w, w), feature_radius_min)

        w = feature_radius_max*2 | 1
        veryblurred = cv2.GaussianBlur(img, (w, w), feature_radius_max)
    else:
        s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (feature_radius_min, feature_radius_min))
        blurred = cv2.morphologyEx(img, cv2.MORPH_OPEN, s)

        s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (feature_radius_max, feature_radius_max))
        veryblurred = cv2.morphologyEx(img, cv2.MORPH_OPEN, s)

    bandfiltered = blurred - np.minimum(veryblurred, blurred)
    return bandfiltered
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def equalize(image, image_lower=0.0, image_upper=255.0):
    image_lower = int(image_lower*2)/2
    image_lower +=1
    image_lower = max(3,image_lower)
    mean = cv2.medianBlur(image,255)
    image = image - (mean-100)
    # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    # cv2.dilate(image, kernel, image, iterations=1)
    return image
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def denoise_foreground(img, fgmask):
    img_bw = 255*(fgmask > 5).astype('uint8')
    se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
    se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
    mask = cv2.morphologyEx(img_bw, cv2.MORPH_CLOSE, se1)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
    mask = np.dstack([mask, mask, mask]) / 255
    img_dn = img * mask
    return img_dn
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def __init__(self, bgim):
        if not isinstance(bgim, list):
            bgim = [bgim]
        bgim = np.asarray(bgim)
        self.bgim = bgim.mean(axis=0).astype('float32')
        self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
        self.dilate_k =  cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def closing(mask):
    assert isinstance(mask, numpy.ndarray), 'mask must be a numpy array'
    assert mask.ndim == 2, 'mask must be a greyscale image'
    logger.debug("closing mask of shape {0}".format(mask.shape))

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)

    return mask
项目:BlurDetection    作者:whdcumt    | 项目源码 | 文件源码
def morphology(msk):
    assert isinstance(msk, numpy.ndarray), 'msk must be a numpy array'
    assert msk.ndim == 2, 'msk must be a greyscale image'
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    msk = cv2.erode(msk, kernel, iterations=1)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    msk = cv2.morphologyEx(msk, cv2.MORPH_CLOSE, kernel)
    msk[msk < 128] = 0
    msk[msk > 127] = 255
    return msk
项目:SynthText    作者:ankush-me    | 项目源码 | 文件源码
def border(self, alpha, size, kernel_type='RECT'):
        """
        alpha : alpha layer of the text
        size  : size of the kernel
        kernel_type : one of [rect,ellipse,cross]

        @return : alpha layer of the border (color to be added externally).
        """
        kdict = {'RECT':cv.MORPH_RECT, 'ELLIPSE':cv.MORPH_ELLIPSE,
                 'CROSS':cv.MORPH_CROSS}
        kernel = cv.getStructuringElement(kdict[kernel_type],(size,size))
        border = cv.dilate(alpha,kernel,iterations=1) # - alpha
        return border
项目:doc2text    作者:jlsutherland    | 项目源码 | 文件源码
def reduce_noise_edges(im):
    structuring_element = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
    opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, structuring_element)
    maxed_rows = rank_filter(opening, -4, size=(1, 20))
    maxed_cols = rank_filter(opening, -4, size=(20, 1))
    debordered = np.minimum(np.minimum(opening, maxed_rows), maxed_cols)
    return debordered
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def identify_OD(image):
    newfin = cv2.dilate(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations=2)
    mask = np.ones(newfin.shape[:2], dtype="uint8") * 255
    y1, ycontours, yhierarchy = cv2.findContours(newfin.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    prev_contour = ycontours[0]
    for cnt in ycontours:
        if cv2.contourArea(cnt) >= cv2.contourArea(prev_contour):
            prev_contour = cnt
            cv2.drawContours(mask, [cnt], -1, 0, -1)
    M = cv2.moments(prev_contour)
    cx = int(M['m10']/M['m00'])
    cy = int(M['m01']/M['m00'])
    #print(cx,cy)
    return (cx,cy)
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def edge_pixel_image(image,bv_image):
    edge_result = image.copy()
    edge_result = cv2.Canny(edge_result,30,100) 
    i = 0
    j = 0
    while i < image.shape[0]:
        j = 0
        while j < image.shape[1]:
            if edge_result[i,j] == 255 and bv_image[i,j] == 255:
                edge_result[i,j] = 0
            j = j+1
        i = i+1
    newfin = cv2.dilate(edge_result, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    return newfin
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):          
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def identify_OD(image):
    newfin = cv2.dilate(image, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations=2)
    mask = np.ones(newfin.shape[:2], dtype="uint8") * 255
    y1, ycontours, yhierarchy = cv2.findContours(newfin.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    prev_contour = ycontours[0]
    for cnt in ycontours:
        if cv2.contourArea(cnt) >= cv2.contourArea(prev_contour):
            prev_contour = cnt
            cv2.drawContours(mask, [cnt], -1, 0, -1)
    M = cv2.moments(prev_contour)
    cx = int(M['m10']/M['m00'])
    cy = int(M['m01']/M['m00']) 
    return (cx,cy)
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def edge_pixel_image(image,bv_image):
    edge_result = image.copy()
    edge_result = cv2.Canny(edge_result,30,100) 
    i = 0
    j = 0
    while i < image.shape[0]:
        j = 0
        while j < image.shape[1]:
            if edge_result[i,j] == 255 and bv_image[i,j] == 255:
                edge_result[i,j] = 0
            j = j+1
        i = i+1
    newfin = cv2.dilate(edge_result, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    return newfin
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:UVA    作者:chiachun    | 项目源码 | 文件源码
def skin_filter(cfg, vd):
    df = pd.read_csv(vd.photo_csv, index_col=0)
    numbers = df.number.tolist()
    notface = []
    for number in numbers:
        lower = np.array([0, 48, 80], dtype = "uint8")
        upper = np.array([13, 255, 255], dtype = "uint8")
        image = cv2.imread('%s/%d.png' % (vd.photo_dir, number), cv2.IMREAD_COLOR)
        converted = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        skinMask = cv2.inRange(converted, lower, upper)

        # apply a series of erosions and dilations to the mask
        # using an elliptical kernel
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
        skinMask = cv2.erode(skinMask, kernel, iterations = 2)
        skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

        # blur the mask to help remove noise, then apply the
        # mask to the frame
        skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
        skin = cv2.bitwise_and(image, image, mask = skinMask)
        if len(skin.nonzero()[0]) < cfg.min_skin_pixels:
            notface.append(number)
    print '%d/%d are faces' % ( len(df) - len(notface), len(df) )
    df['face']= 1
    df.loc[df.number.isin(notface),'face'] = -99
    df.to_csv(vd.photo_csv)
项目:hand-gesture-recognition-opencv    作者:mahaveerverma    | 项目源码 | 文件源码
def hand_threshold(frame_in,hand_hist):
    frame_in=cv2.medianBlur(frame_in,3)
    hsv=cv2.cvtColor(frame_in,cv2.COLOR_BGR2HSV)
    hsv[0:int(cap_region_y_end*hsv.shape[0]),0:int(cap_region_x_begin*hsv.shape[1])]=0 # Right half screen only
    hsv[int(cap_region_y_end*hsv.shape[0]):hsv.shape[0],0:hsv.shape[1]]=0
    back_projection = cv2.calcBackProject([hsv], [0,1],hand_hist, [00,180,0,256], 1)
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_elem_size,morph_elem_size))
    cv2.filter2D(back_projection, -1, disc, back_projection)
    back_projection=cv2.GaussianBlur(back_projection,(gaussian_ksize,gaussian_ksize), gaussian_sigma)
    back_projection=cv2.medianBlur(back_projection,median_ksize)
    ret, thresh = cv2.threshold(back_projection, hsv_thresh_lower, 255, 0)

    return thresh

# 3. Find hand contour
项目:ghetto_omr    作者:pohzhiee    | 项目源码 | 文件源码
def outlining(img):
    #kernel size
    kernel_size=3
    #-------------------------------------------------
    #bilateral filter, sharpen, thresh image
    biblur=cv2.bilateralFilter(img,20,175,175)
    sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
    ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)

    #negative and closed image
    inv=cv2.bitwise_not(thresh1)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
    closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
    return closed
项目:DeHaze    作者:XierHacker    | 项目源码 | 文件源码
def DarkChannel(im, sz):
    b, g, r = cv2.split(im)
    dc = cv2.min(cv2.min(r, g), b);
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    dark = cv2.erode(dc, kernel)
    return dark
项目:DeHaze    作者:XierHacker    | 项目源码 | 文件源码
def DarkChannel(im,sz):
    b,g,r = cv2.split(im)
    dc = cv2.min(cv2.min(r,g),b);
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(sz,sz))
    dark = cv2.erode(dc,kernel)
    return dark
项目:pytesseractID    作者:iChenwin    | 项目源码 | 文件源码
def dealImage(image, thresh):
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 1))
    dilate = cv2.dilate(image, kernel)

    gray = cv2.cvtColor(dilate, cv2.COLOR_RGB2GRAY)
    return cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]

# ??????
项目:pytesseractID    作者:iChenwin    | 项目源码 | 文件源码
def dealImage(image, thresh):
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 1))
    dilate = cv2.dilate(image, kernel)

    gray = cv2.cvtColor(dilate, cv2.COLOR_RGB2GRAY)
    return cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]

# ??????
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_extra(im):
    # ?????
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
    bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)

    img_show_hook("??????", bgmask)

    # ??????
    # ??????????
    im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
                                cv2.CHAIN_APPROX_SIMPLE)
    return contours
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_extra(im):
    # ?????
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
    bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)

    img_show_hook("??????", bgmask)

    # ??????
    # ??????????
    im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
                                cv2.CHAIN_APPROX_SIMPLE)
    return contours
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def find_contours(self, image):
        image = qimage_to_numpy(image)
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        #_,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV)
        # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
        # dilated = cv2.dilate(gray,kernel,iterations = 13)
        contours, hierarchy = cv2.findContours(gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        return contours
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def __init__(self):
        super(TargetFilterBrightness, self).__init__()

        # elements to reuse in erode/dilate
        # CROSS elimates more horizontal/vertical lines and leaves more
        # blobs with extent in both axes [than RECT].
        self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
        self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
项目:digit-ocr    作者:Nozdi    | 项目源码 | 文件源码
def resize_digits(digits):
    digits = map(itemgetter('image'), sorted(digits, key=itemgetter('x')))
    blur_kernel = np.ones((4, 4), np.float32)/(4*4)
    erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    return [
        cv2.resize(
            cv2.bitwise_not(
                cv2.filter2D(
                    cv2.erode(digit, erode_kernel, iterations=1),
                    -1, blur_kernel)
            ),
            (20, 20))
        for digit in digits]
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def extractEdges(hue, intensity):
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

    edges = cv2.Canny(intensity, 120, 140)
    hue_edges = cv2.Canny(cv2.GaussianBlur(hue, (5, 5), 0), 0, 255)
    combined_edges = cv2.bitwise_or(hue_edges, edges)
    _, mask = cv2.threshold(combined_edges, 40, 255, cv2.THRESH_BINARY)
    return cv2.erode(cv2.GaussianBlur(mask, (3, 3), 0), kernel, iterations=1)
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def roiFromEdges(edges):
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    small_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    # close gaps in edges to create continous regions
    roi = cv2.dilate(edges, small_kernel, iterations=14)
    return cv2.erode(roi, kernel, iterations=4)
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def roiMask(image, boundaries):
    scale = max([1.0, np.average(np.array(image.shape)[0:2] / 400.0)])
    shape = (int(round(image.shape[1] / scale)), int(round(image.shape[0] / scale)))

    small_color = cv2.resize(image, shape, interpolation=cv2.INTER_LINEAR)

    # reduce details and remove noise for better edge detection
    small_color = cv2.bilateralFilter(small_color, 8, 64, 64)
    small_color = cv2.pyrMeanShiftFiltering(small_color, 8, 64, maxLevel=1)
    small = cv2.cvtColor(small_color, cv2.COLOR_BGR2HSV)

    hue = small[::, ::, 0]
    intensity = cv2.cvtColor(small_color, cv2.COLOR_BGR2GRAY)

    edges = extractEdges(hue, intensity)
    roi = roiFromEdges(edges)
    weight_map = weightMap(hue, intensity, edges, roi)

    _, final_mask = cv2.threshold(roi, 5, 255, cv2.THRESH_BINARY)
    small = cv2.bitwise_and(small, small, mask=final_mask)

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))

    for (lower, upper) in boundaries:
        lower = np.array([lower, 80, 50], dtype="uint8")
        upper = np.array([upper, 255, 255], dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(small, lower, upper)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=3)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
        final_mask = cv2.bitwise_and(final_mask, mask)

    # blur the mask for better contour extraction
    final_mask = cv2.GaussianBlur(final_mask, (5, 5), 0)
    return (final_mask, weight_map, scale)
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    gau=cv2.GaussianBlur(gray,(5,5),0)
    ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    med=cv2.medianBlur(thre,5)
    canny=cv2.Canny(thre,100,200)
    #sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
    dilation=cv2.dilate(canny,element2,iterations = 1)
    dst=cv2.erode(dilation, element1, iterations = 1)
    return dst
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def check_if_good_boundary(self, boundary, norm_height, norm_width, color_img):
        preprocess_bg_mask = PreprocessBackgroundMask(boundary)
        char_w = norm_width / 20
        remove_noise = PreprocessRemoveNonCharNoise(char_w)

        id_card_img_mask = preprocess_bg_mask.do(color_img)
        id_card_img_mask[0:int(norm_height*0.05),:] = 0
        id_card_img_mask[int(norm_height*0.95): ,:] = 0
        id_card_img_mask[:, 0:int(norm_width*0.05)] = 0
        id_card_img_mask[:, int(norm_width*0.95):] = 0

        remove_noise.do(id_card_img_mask)

#        se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
#        se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
#        mask = cv2.morphologyEx(id_card_img_mask, cv2.MORPH_CLOSE, se1)
#        id_card_img_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
#  
        ## remove right head profile
        left_half_id_card_img_mask = np.copy(id_card_img_mask)
        left_half_id_card_img_mask[:, norm_width/2:] = 0

        ## Try to find text lines and chars
        horizontal_sum = np.sum(left_half_id_card_img_mask, axis=1)
        line_ranges = extract_peek_ranges_from_array(horizontal_sum)

        return len(line_ranges) >= 5 and len(line_ranges) <= 7
项目:image_text_reader    作者:yardstick17    | 项目源码 | 文件源码
def contour_plot_on_text_in_image(inv_img):
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 2))
    dilated = cv2.dilate(inv_img, kernel, iterations=7)  # dilate
    _, contours, hierarchy = cv2.findContours(
        dilated,
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_NONE)  # get contours
    return contours