Python cv2 模块,morphologyEx() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.morphologyEx()

项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects)
项目:dvd    作者:ajayrfhp    | 项目源码 | 文件源码
def MoG2(vid, min_thresh=800, max_thresh=10000):
    '''
    Args    : Video object and threshold parameters
    Returns : None
    '''
    cap = cv2.VideoCapture(vid)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    connectivity = 4
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        output = cv2.connectedComponentsWithStats(
            fgmask, connectivity, cv2.CV_32S)
        for i in range(output[0]):
            if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
                cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
                    output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
        cv2.imshow('detection', frame)
    cap.release()
    cv2.destroyAllWindows()
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def select_largest_obj(self, img_bin, lab_val=255, fill_holes=False, 
                           smooth_boundary=False, kernel_size=15):
        '''Select the largest object from a binary image and optionally
        fill holes inside it and smooth its boundary.
        Args:
            img_bin (2D array): 2D numpy array of binary image.
            lab_val ([int]): integer value used for the label of the largest 
                    object. Default is 255.
            fill_holes ([boolean]): whether fill the holes inside the largest 
                    object or not. Default is false.
            smooth_boundary ([boolean]): whether smooth the boundary of the 
                    largest object using morphological opening or not. Default 
                    is false.
            kernel_size ([int]): the size of the kernel used for morphological 
                    operation. Default is 15.
        Returns:
            a binary image as a mask for the largest object.
        '''
        n_labels, img_labeled, lab_stats, _ = \
            cv2.connectedComponentsWithStats(img_bin, connectivity=8, 
                                             ltype=cv2.CV_32S)
        largest_obj_lab = np.argmax(lab_stats[1:, 4]) + 1
        largest_mask = np.zeros(img_bin.shape, dtype=np.uint8)
        largest_mask[img_labeled == largest_obj_lab] = lab_val
        # import pdb; pdb.set_trace()
        if fill_holes:
            bkg_locs = np.where(img_labeled == 0)
            bkg_seed = (bkg_locs[0][0], bkg_locs[1][0])
            img_floodfill = largest_mask.copy()
            h_, w_ = largest_mask.shape
            mask_ = np.zeros((h_ + 2, w_ + 2), dtype=np.uint8)
            cv2.floodFill(img_floodfill, mask_, seedPoint=bkg_seed, 
                          newVal=lab_val)
            holes_mask = cv2.bitwise_not(img_floodfill)  # mask of the holes.
            largest_mask = largest_mask + holes_mask
        if smooth_boundary:
            kernel_ = np.ones((kernel_size, kernel_size), dtype=np.uint8)
            largest_mask = cv2.morphologyEx(largest_mask, cv2.MORPH_OPEN, 
                                            kernel_)

        return largest_mask
项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __filterRedColor(image_hsv):
    """
        Filters the red color from image_hsv and returns mask.
    """
    mask1 = cv2.inRange(image_hsv, np.array([0, 100, 65]), np.array([10, 255, 255]))
    mask2 = cv2.inRange(image_hsv, np.array([155, 100, 70]), np.array([179, 255, 255]))
    mask = mask1 + mask2
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    mask = cv2.Canny(mask, 50, 100)
    mask = cv2.GaussianBlur(mask, (13, 13), 0)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    return mask
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def skin_detect(self, raw_yrb, img_src):
        # use median blurring to remove signal noise in YCRCB domain
        raw_yrb = cv2.medianBlur(raw_yrb, 5)
        mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)

        # morphological transform to remove unwanted part
        kernel = np.ones((5, 5), np.uint8)
        #mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel)
        mask_skin = cv2.dilate(mask_skin, kernel, iterations=2)

        res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin)
        #res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21)

        return res_skin


# Do background subtraction with some filtering
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,closing)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=closing
项目:PyFRAP    作者:alexblaessle    | 项目源码 | 文件源码
def getContours(img,kernel=(10,10)):

    #Define kernel
    kernel = np.ones(kernel, np.uint8)

    #Open to erode small patches
    thresh = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)

    #Close little holes
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,kernel, iterations=4)

    #Find contours
    #contours=skimsr.find_contours(thresh,0)

    thresh=thresh.astype('uint8')
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)

    areas=[]
    for c in contours:
        areas.append(cv2.contourArea(c))

    return contours,thresh,areas
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def recognize_text(original):
    idcard = original
    gray = cv2.cvtColor(idcard, cv2.COLOR_BGR2GRAY)

    # Morphological gradient:
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    opening = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)

    # Binarization
    ret, binarization = cv2.threshold(opening, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    # Connected horizontally oriented regions
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
    connected = cv2.morphologyEx(binarization, cv2.MORPH_CLOSE, kernel)

    # find countours
    _, contours, hierarchy = cv2.findContours(
        connected, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE
    )
    return contours, hierarchy
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def _extract_arm(self, img):
        # find center region of image frame (assume center region is 21 x 21 px)
        center_half = 10 # (=(21-1)/2)  
        center = img[self.height/2 - center_half : self.height/2 + center_half, self.width/2 - center_half : self.width/2 + center_half]

        # determine median depth value
        median_val = np.median(center)

        '''mask the image such that all pixels whose depth values
        lie within a particular range are gray and the rest are black
        '''

        img = np.where(abs(img-median_val) <= self.abs_depth_dev, 128, 0).astype(np.uint8)

        # Apply morphology operation to fill small holes in the image
        kernel = np.ones((5,5), np.uint8)
        img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)

        # Find connected regions (to hand) to remove background objects
        # Use floodfill with a small image area (7 x 7 px) that is set gray color value
        kernel2 = 3
        img[self.height/2-kernel2:self.height/2+kernel2, self.width/2-kernel2:self.width/2+kernel2] = 128

        # a black mask to mask the 'non-connected' components black
        mask = np.zeros((self.height + 2, self.width + 2), np.uint8)
        floodImg = img.copy()

        # Use floodFill function to paint the connected regions white 
        cv2.floodFill(floodImg, mask, (self.width/2, self.height/2), 255, flags=(4 | 255 << 8))

        # apply a binary threshold to show only connected hand region
        ret, floodedImg = cv2.threshold(floodImg, 129, 255, cv2.THRESH_BINARY)

        return floodedImg
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def update(dummy=None):
        sz = cv2.getTrackbarPos('op/size', 'morphology')
        iters = cv2.getTrackbarPos('iters', 'morphology')
        opers = cur_mode.split('/')
        if len(opers) > 1:
            sz = sz - 10
            op = opers[sz > 0]
            sz = abs(sz)
        else:
            op = opers[0]
        sz = sz*2+1

        str_name = 'MORPH_' + cur_str_mode.upper()
        oper_name = 'MORPH_' + op.upper()
        st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz))
        res = cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters)

        draw_str(res, (10, 20), 'mode: ' + cur_mode)
        draw_str(res, (10, 40), 'operation: ' + oper_name)
        draw_str(res, (10, 60), 'structure: ' + str_name)
        draw_str(res, (10, 80), 'ksize: %d  iters: %d' % (sz, iters))
        cv2.imshow('morphology', res)
项目:image_text_reader    作者:yardstick17    | 项目源码 | 文件源码
def remove_noise_and_smooth(file_name):
    logging.info('Removing noise and smoothening image')
    img = cv2.imread(file_name, 0)
    filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41, 3)
    kernel = np.ones((1, 1), np.uint8)
    opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    img = image_smoothening(img)
    or_image = cv2.bitwise_or(img, closing)
    return or_image
项目:opentrack-prototyping    作者:DaMichel    | 项目源码 | 文件源码
def simple_feature_size_filter(img, minradius, maxradius):
    feature_radius_min = minradius  | 1 # play with these to see show they affect highlighting of structures of various sizes
    feature_radius_max = maxradius | 1

    if 0:
        w = feature_radius_min*2 | 1
        blurred = cv2.GaussianBlur(img, (w, w), feature_radius_min)

        w = feature_radius_max*2 | 1
        veryblurred = cv2.GaussianBlur(img, (w, w), feature_radius_max)
    else:
        s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (feature_radius_min, feature_radius_min))
        blurred = cv2.morphologyEx(img, cv2.MORPH_OPEN, s)

        s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (feature_radius_max, feature_radius_max))
        veryblurred = cv2.morphologyEx(img, cv2.MORPH_OPEN, s)

    bandfiltered = blurred - np.minimum(veryblurred, blurred)
    return bandfiltered
项目:osrmacro    作者:jjvilm    | 项目源码 | 文件源码
def isInvEmpty():
    bag, bagx,bagy = get_bag('bag and coords', 'hsv')
    # looks for color of empty inv
    low = np.array([10,46,58])
    high= np.array([21,92,82])
    # applies mask
    mask = cv2.inRange(bag, low, high)
    # removes any noise
    kernel = np.ones((5,5), np.uint8)
    closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

    # looks to see if the inv is all white pixels
    # returns true, else False
    if (closing.view() == 255).all():
        return True
    return False
项目:document-layout-analysis    作者:rbaguila    | 项目源码 | 文件源码
def process_letter(thresh,output):  
    # assign the kernel size    
    kernel = np.ones((2,1), np.uint8) # vertical
    # use closing morph operation then erode to narrow the image    
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
    # temp_img = cv2.erode(thresh,kernel,iterations=2)      
    letter_img = cv2.erode(temp_img,kernel,iterations=1)

    # find contours 
    (contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    # loop in all the contour areas
    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   


#processing letter by letter boxing
项目:document-layout-analysis    作者:rbaguila    | 项目源码 | 文件源码
def process_word(thresh,output):    
    # assign 2 rectangle kernel size 1 vertical and the other will be horizontal    
    kernel = np.ones((2,1), np.uint8)
    kernel2 = np.ones((1,4), np.uint8)
    # use closing morph operation but fewer iterations than the letter then erode to narrow the image   
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2)
    #temp_img = cv2.erode(thresh,kernel,iterations=2)   
    word_img = cv2.dilate(temp_img,kernel2,iterations=1)

    (contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   

#processing line by line boxing
项目:document-layout-analysis    作者:rbaguila    | 项目源码 | 文件源码
def process_line(thresh,output):    
    # assign a rectangle kernel size    1 vertical and the other will be horizontal
    kernel = np.ones((1,5), np.uint8)
    kernel2 = np.ones((2,4), np.uint8)  
    # use closing morph operation but fewer iterations than the letter then erode to narrow the image   
    temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2)
    #temp_img = cv2.erode(thresh,kernel,iterations=2)   
    line_img = cv2.dilate(temp_img,kernel,iterations=5)

    (contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)

    return output   

#processing par by par boxing
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def _smooth_ball_mask(self, mask):
        """
        The mask created inDetectBallPosition might be noisy.
        :param mask: The mask to smooth (Image with bit depth 1)
        :return: The smoothed mask
        """
        # create the disk-shaped kernel for the following image processing,
        r = 3
        kernel = np.ones((2*r, 2*r), np.uint8)
        for x in range(0, 2*r):
            for y in range(0, 2*r):
                if(x - r + 0.5)**2 + (y - r + 0.5)**2 > r**2:
                    kernel[x, y] = 0

        # remove noise
        # see http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

        return mask
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def SmoothFieldMask(self, mask):
        # erst Close und dann DILATE führt zu guter Erkennung der Umrandung oben

        kernel = np.ones((20,20),np.uint8)


        kernel = np.ones((5,5),np.uint8)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
        #kernel = np.ones((20,20),np.uint8)
    #mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel)
        #kernel = np.ones((20,20),np.uint8)

        mask = cv2.GaussianBlur(mask,(11,11),0)

        #mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel)

    #    plt.imshow(cv2.cvtColor(cv2.bitwise_and(self.ImgHSV,self.ImgHSV,mask=mask),cv2.COLOR_HSV2RGB),cmap="gray")
     #   plt.show()

        return mask
项目:hazcam    作者:alex-sherman    | 项目源码 | 文件源码
def update_edge_mask(self, previous_mask, previous_line, slope_sign, thrs1, thrs2, debug):
        lines = cv2.HoughLinesP(self.edge, 1, np.pi / 180, 70, minLineLength = 10, maxLineGap = 200)
        lines = filter_lines(lines, self.vanishing_height, self.edge.shape[0], slope_sign)
        self.lines.extend(lines)
        mask = np.zeros(self.edge.shape, np.uint8)
        for line in lines:
            x1,y1,x2,y2 = line
            cv2.line(mask, (x1,y1),(x2,y2), 255, MASK_WIDTH)
        mask = cv2.addWeighted(mask, MASK_WEIGHT, previous_mask, 1 - MASK_WEIGHT, 0)
        #self.current_mask *= int(255.0 / self.current_mask.max())
        previous_mask = mask.copy()
        _, mask = cv2.threshold(mask, 40, 255, cv2.THRESH_BINARY)
        masked_edges = cv2.morphologyEx(cv2.bitwise_and(self.edge, self.edge, mask = mask), cv2.MORPH_CLOSE, np.array([[1] * EDGE_DILATION] *EDGE_DILATION))
        lines2 = cv2.HoughLinesP(masked_edges, 1, np.pi / 180, 70, minLineLength = 10, maxLineGap = 200)
        lines2 = filter_lines(lines2, self.vanishing_height, self.edge.shape[0], slope_sign)
        self.lines2.extend(lines2)
        for line in lines2:
            x1,y1,x2,y2 = line
            cv2.line(mask, (x1,y1),(x2,y2), 255, MASK_WIDTH)
            previous_line[0] = add(previous_line[0], (x2,y2))
            previous_line[1] = add(previous_line[1], (x_at_y(self.edge.shape[0]*0.6, x1, y1, x2, y2), self.edge.shape[0]*0.6))
        previous_line[0] = scale(previous_line[0], 1.0 / (len(lines2) + 1))
        previous_line[1] = scale(previous_line[1], 1.0 / (len(lines2) + 1))
        return masked_edges, mask, previous_mask, previous_line
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Morphing(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    ks=obj.kernel
    kernel = np.ones((ks,ks),np.uint8)
    if obj.filter == 'dilation':
        dilation = cv2.dilate(img,kernel,iterations = 1)
        img=dilation
    if obj.filter == 'erosion':
        dilation = cv2.erode(img,kernel,iterations = 1)
        img=dilation
    if obj.filter == 'opening':
        dilation = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        img=dilation
    if obj.filter == 'closing':
        dilation = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
        img=dilation

    obj.Proxy.img = img



#
# property functions for HoughLines
#
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def denoise_foreground(img, fgmask):
    img_bw = 255*(fgmask > 5).astype('uint8')
    se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
    se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
    mask = cv2.morphologyEx(img_bw, cv2.MORPH_CLOSE, se1)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
    mask = np.dstack([mask, mask, mask]) / 255
    img_dn = img * mask
    return img_dn
项目:cv-lane    作者:kendricktan    | 项目源码 | 文件源码
def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top

    # Gets metadata from our contours
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def __call__(self, image):
        """Returns a foreground mask of the image."""
        return cv2.morphologyEx(self.fgbg.apply(image), cv2.MORPH_OPEN,
                                self.strel)
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def cv2_morph_close(binary_image, size=5):
    import cv2
    from skimage.morphology import disk
    kernel = disk(size)
    result = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)
    return result
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def cv2_morph_open(binary_image, size=5):
    import cv2
    from skimage.morphology import disk
    kernel = disk(size)
    result = cv2.morphologyEx(binary_image, cv2.MORPH_OPEN, kernel)
    return result
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def segment(self, im):
        mask = np.square(im.astype('float32') - self.bgim
                ).sum(axis=2) / 20
        mask = np.clip(mask, 0, 255).astype('uint8')
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)
        mask = cv2.dilate(mask, self.dilate_k)
        mask = mask.astype('uint8')
        return (mask > 10).astype('float32') *255
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def closing(mask):
    assert isinstance(mask, numpy.ndarray), 'mask must be a numpy array'
    assert mask.ndim == 2, 'mask must be a greyscale image'
    logger.debug("closing mask of shape {0}".format(mask.shape))

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)

    return mask
项目:BlurDetection    作者:whdcumt    | 项目源码 | 文件源码
def morphology(msk):
    assert isinstance(msk, numpy.ndarray), 'msk must be a numpy array'
    assert msk.ndim == 2, 'msk must be a greyscale image'
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    msk = cv2.erode(msk, kernel, iterations=1)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    msk = cv2.morphologyEx(msk, cv2.MORPH_CLOSE, kernel)
    msk[msk < 128] = 0
    msk[msk > 127] = 255
    return msk
项目:doc2text    作者:jlsutherland    | 项目源码 | 文件源码
def reduce_noise_edges(im):
    structuring_element = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
    opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, structuring_element)
    maxed_rows = rank_filter(opening, -4, size=(1, 20))
    maxed_cols = rank_filter(opening, -4, size=(20, 1))
    debordered = np.minimum(np.minimum(opening, maxed_rows), maxed_cols)
    return debordered
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):          
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:gaps    作者:nemanja-m    | 项目源码 | 文件源码
def _filter_image(self, image):
        _, thresh = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)
        opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, (5, 5), iterations=3)

        return cv2.bitwise_not(opened)
项目:2017-robot    作者:frc1418    | 项目源码 | 文件源码
def threshold(self, img):
        cv2.cvtColor(img, cv2.COLOR_BGR2HSV, dst=self.hsv)
        cv2.inRange(self.hsv, self.thresh_low, self.thresh_high, dst=self.bin)

        cv2.morphologyEx(self.bin, cv2.MORPH_CLOSE, self.morphKernel, dst=self.bin2, iterations=1)

        if self.draw_thresh:
            b = (self.bin2 != 0)
            cv2.copyMakeBorder(self.black, 0, 0, 0, 0, cv2.BORDER_CONSTANT, value=self.RED, dst=self.out)
            self.out[np.dstack((b, b, b))] = 255

        return self.bin2
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def CloseInContour( mask, element ):
    large = 0
    result = mask
    _, contours, _ = cv2.findContours(result,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #find the biggest area
    c = max(contours, key = cv2.contourArea)

    closing = cv2.morphologyEx(result, cv2.MORPH_CLOSE, element)
    for x in range(mask.shape[0]):
        for y in range(mask.shape[1]):
             pt = cv2.pointPolygonTest(c, (x, y), True)
             #pt = cv2.pointPolygonTest(c, (x, y), False)
             if pt > 3:
                result[x][y] = closing[x][y]
    return result.astype(np.float32)
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def CloseInContour( mask, element ):
        large = 0
        result = mask
        _, contours, _ = cv2.findContours(result,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        #find the biggest area
        c = max(contours, key = cv2.contourArea)

        closing = cv2.morphologyEx(result, cv2.MORPH_CLOSE, element)
        for x in range(mask.shape[0]):
            for y in range(mask.shape[1]):
                 #pt = cv2.pointPolygonTest(c, (x, y), True)
                 pt = cv2.pointPolygonTest(c, (x, y), False)
                 if pt == 1:
                    result[x][y] = closing[x][y]
        return result.astype(np.float32)
项目:ghetto_omr    作者:pohzhiee    | 项目源码 | 文件源码
def outlining(img):
    #kernel size
    kernel_size=3
    #-------------------------------------------------
    #bilateral filter, sharpen, thresh image
    biblur=cv2.bilateralFilter(img,20,175,175)
    sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
    ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)

    #negative and closed image
    inv=cv2.bitwise_not(thresh1)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
    closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
    return closed
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_extra(im):
    # ?????
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
    bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)

    img_show_hook("??????", bgmask)

    # ??????
    # ??????????
    im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
                                cv2.CHAIN_APPROX_SIMPLE)
    return contours
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_extra(im):
    # ?????
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(13,7))
    bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)

    img_show_hook("??????", bgmask)

    # ??????
    # ??????????
    im2, contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL, #????
                                cv2.CHAIN_APPROX_SIMPLE)
    return contours
项目:headlights    作者:Team395    | 项目源码 | 文件源码
def maskImg(image):
    #Convert image from RBG (red blue green) to HSV (hue shade value)
    maskedImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    #Convert image to binary using the predefined color arrays
    maskedImage = cv2.inRange(maskedImage, lowColor, highColor)
    #Removes white noise using an open transformation
    kernel = np.ones((4,4), np.uint8)
    #maskedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel)
    return maskedImage
项目:headlights    作者:Team395    | 项目源码 | 文件源码
def maskImg(image):
    #Convert image from RBG (red blue green) to HSV (hue shade value)
    maskedImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    #Convert image to binary using the predefined color arrays
    maskedImage = cv2.inRange(maskedImage, lowColor, highColor)
    #Removes white noise using an open transformation
    kernel = np.ones((4,4), np.uint8)
    #maskedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel)
    return maskedImage

#Find and return two matching rectangular contours if they exist, otherwise return none.
项目:opencv-helpers    作者:abarrak    | 项目源码 | 文件源码
def remove_noise(image, kernel=(2, 2)):
  ''' removes noisy pixels in the area. '''
  return cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
项目:opencv-helpers    作者:abarrak    | 项目源码 | 文件源码
def fill(image, kernel=(2, 2)):
  ''' fill gaps in shapes structure. '''
  return cv.morphologyEx(image, cv.MORPH_CLOSE, kernel)
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def closing(img, kernel_size):
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
    return closing
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def opening(img, kernel_size):
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    return opening
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def closing(img,kernel_size):
    kernel = np.ones((kernel_size,kernel_size),np.uint8)
    closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
    return closing
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def opening(img,kernel_size):
    kernel = np.ones((kernel_size,kernel_size),np.uint8)
    opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    return opening
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def detect(self, image, mask = None):
        floatimage = np.float32(image)

        fb,fg,fr = cv2.split(floatimage)

        # red-to-blue channel operation
        ra = fr + fb
        rb = fr - fb
        rb[ra > 0] /= ra[ra > 0]
        #mi = np.min(rb)
        #ma = np.max(rb)
        #rb = np.uint8((rb - mi) / (ma - mi) * 255)

        # morphology open
        if self.kernel is None or self.kernel.shape[0] != Configuration.background_rect_size:
            self.kernel = np.ones((Configuration.background_rect_size, Configuration.background_rect_size), np.uint8) * 255

        result = cv2.morphologyEx(rb, cv2.MORPH_OPEN, self.kernel)

        # background subtraction
        # homogeneous background image V
        result = rb - result

        mi = np.min(result)
        ma = np.max(result)
        result = np.uint8((result - mi) / (ma - mi) * 255)

        # adaptive threshold T
        T, _ = cv2.threshold(result[mask == 0], 0, 1, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        # V(i, j) > T
        return np.uint8((T - np.float32(result)) <= 0)
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def close_result(self, result):
        return cv2.morphologyEx(result, cv2.MORPH_CLOSE, self.kernel)
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def roiMask(image, boundaries):
    scale = max([1.0, np.average(np.array(image.shape)[0:2] / 400.0)])
    shape = (int(round(image.shape[1] / scale)), int(round(image.shape[0] / scale)))

    small_color = cv2.resize(image, shape, interpolation=cv2.INTER_LINEAR)

    # reduce details and remove noise for better edge detection
    small_color = cv2.bilateralFilter(small_color, 8, 64, 64)
    small_color = cv2.pyrMeanShiftFiltering(small_color, 8, 64, maxLevel=1)
    small = cv2.cvtColor(small_color, cv2.COLOR_BGR2HSV)

    hue = small[::, ::, 0]
    intensity = cv2.cvtColor(small_color, cv2.COLOR_BGR2GRAY)

    edges = extractEdges(hue, intensity)
    roi = roiFromEdges(edges)
    weight_map = weightMap(hue, intensity, edges, roi)

    _, final_mask = cv2.threshold(roi, 5, 255, cv2.THRESH_BINARY)
    small = cv2.bitwise_and(small, small, mask=final_mask)

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))

    for (lower, upper) in boundaries:
        lower = np.array([lower, 80, 50], dtype="uint8")
        upper = np.array([upper, 255, 255], dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(small, lower, upper)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=3)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
        final_mask = cv2.bitwise_and(final_mask, mask)

    # blur the mask for better contour extraction
    final_mask = cv2.GaussianBlur(final_mask, (5, 5), 0)
    return (final_mask, weight_map, scale)