Python cv2 模块,MORPH_OPEN 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用cv2.MORPH_OPEN

项目:dvd    作者:ajayrfhp    | 项目源码 | 文件源码
def MoG2(vid, min_thresh=800, max_thresh=10000):
    '''
    Args    : Video object and threshold parameters
    Returns : None
    '''
    cap = cv2.VideoCapture(vid)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    connectivity = 4
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        output = cv2.connectedComponentsWithStats(
            fgmask, connectivity, cv2.CV_32S)
        for i in range(output[0]):
            if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
                cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
                    output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
        cv2.imshow('detection', frame)
    cap.release()
    cv2.destroyAllWindows()
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def select_largest_obj(self, img_bin, lab_val=255, fill_holes=False, 
                           smooth_boundary=False, kernel_size=15):
        '''Select the largest object from a binary image and optionally
        fill holes inside it and smooth its boundary.
        Args:
            img_bin (2D array): 2D numpy array of binary image.
            lab_val ([int]): integer value used for the label of the largest 
                    object. Default is 255.
            fill_holes ([boolean]): whether fill the holes inside the largest 
                    object or not. Default is false.
            smooth_boundary ([boolean]): whether smooth the boundary of the 
                    largest object using morphological opening or not. Default 
                    is false.
            kernel_size ([int]): the size of the kernel used for morphological 
                    operation. Default is 15.
        Returns:
            a binary image as a mask for the largest object.
        '''
        n_labels, img_labeled, lab_stats, _ = \
            cv2.connectedComponentsWithStats(img_bin, connectivity=8, 
                                             ltype=cv2.CV_32S)
        largest_obj_lab = np.argmax(lab_stats[1:, 4]) + 1
        largest_mask = np.zeros(img_bin.shape, dtype=np.uint8)
        largest_mask[img_labeled == largest_obj_lab] = lab_val
        # import pdb; pdb.set_trace()
        if fill_holes:
            bkg_locs = np.where(img_labeled == 0)
            bkg_seed = (bkg_locs[0][0], bkg_locs[1][0])
            img_floodfill = largest_mask.copy()
            h_, w_ = largest_mask.shape
            mask_ = np.zeros((h_ + 2, w_ + 2), dtype=np.uint8)
            cv2.floodFill(img_floodfill, mask_, seedPoint=bkg_seed, 
                          newVal=lab_val)
            holes_mask = cv2.bitwise_not(img_floodfill)  # mask of the holes.
            largest_mask = largest_mask + holes_mask
        if smooth_boundary:
            kernel_ = np.ones((kernel_size, kernel_size), dtype=np.uint8)
            largest_mask = cv2.morphologyEx(largest_mask, cv2.MORPH_OPEN, 
                                            kernel_)

        return largest_mask
项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __filterRedColor(image_hsv):
    """
        Filters the red color from image_hsv and returns mask.
    """
    mask1 = cv2.inRange(image_hsv, np.array([0, 100, 65]), np.array([10, 255, 255]))
    mask2 = cv2.inRange(image_hsv, np.array([155, 100, 70]), np.array([179, 255, 255]))
    mask = mask1 + mask2
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    mask = cv2.Canny(mask, 50, 100)
    mask = cv2.GaussianBlur(mask, (13, 13), 0)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    return mask
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def skin_detect(self, raw_yrb, img_src):
        # use median blurring to remove signal noise in YCRCB domain
        raw_yrb = cv2.medianBlur(raw_yrb, 5)
        mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)

        # morphological transform to remove unwanted part
        kernel = np.ones((5, 5), np.uint8)
        #mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel)
        mask_skin = cv2.dilate(mask_skin, kernel, iterations=2)

        res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin)
        #res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21)

        return res_skin


# Do background subtraction with some filtering
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def animpingpong(self):
        obj=self.Object
        img=None
        if not obj.imageFromNode:
            img = cv2.imread(obj.imageFile)
        else:
            print "copy image ..."
            img = obj.imageNode.ViewObject.Proxy.img.copy()
            print "cpied"

        print " loaded"

        # print (obj.blockSize,obj.ksize,obj.k)
#       edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#       color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#       edges=color
#

        kernel = np.ones((obj.xsize,obj.ysize),np.uint8)

        opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


        if True:
            print "zeige"
            cv2.imshow(obj.Label,opening)
            print "gezeigt"
        else:
            from matplotlib import pyplot as plt
            plt.subplot(121),plt.imshow(img,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
            plt.subplot(122),plt.imshow(dst,cmap = 'gray')
            plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
            plt.show()
        print "fertig"
        self.img=opening
项目:PyFRAP    作者:alexblaessle    | 项目源码 | 文件源码
def getContours(img,kernel=(10,10)):

    #Define kernel
    kernel = np.ones(kernel, np.uint8)

    #Open to erode small patches
    thresh = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)

    #Close little holes
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,kernel, iterations=4)

    #Find contours
    #contours=skimsr.find_contours(thresh,0)

    thresh=thresh.astype('uint8')
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)

    areas=[]
    for c in contours:
        areas.append(cv2.contourArea(c))

    return contours,thresh,areas
项目:image_text_reader    作者:yardstick17    | 项目源码 | 文件源码
def remove_noise_and_smooth(file_name):
    logging.info('Removing noise and smoothening image')
    img = cv2.imread(file_name, 0)
    filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41, 3)
    kernel = np.ones((1, 1), np.uint8)
    opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    img = image_smoothening(img)
    or_image = cv2.bitwise_or(img, closing)
    return or_image
项目:opentrack-prototyping    作者:DaMichel    | 项目源码 | 文件源码
def simple_feature_size_filter(img, minradius, maxradius):
    feature_radius_min = minradius  | 1 # play with these to see show they affect highlighting of structures of various sizes
    feature_radius_max = maxradius | 1

    if 0:
        w = feature_radius_min*2 | 1
        blurred = cv2.GaussianBlur(img, (w, w), feature_radius_min)

        w = feature_radius_max*2 | 1
        veryblurred = cv2.GaussianBlur(img, (w, w), feature_radius_max)
    else:
        s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (feature_radius_min, feature_radius_min))
        blurred = cv2.morphologyEx(img, cv2.MORPH_OPEN, s)

        s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (feature_radius_max, feature_radius_max))
        veryblurred = cv2.morphologyEx(img, cv2.MORPH_OPEN, s)

    bandfiltered = blurred - np.minimum(veryblurred, blurred)
    return bandfiltered
项目:TableSoccerCV    作者:StudentCV    | 项目源码 | 文件源码
def _smooth_ball_mask(self, mask):
        """
        The mask created inDetectBallPosition might be noisy.
        :param mask: The mask to smooth (Image with bit depth 1)
        :return: The smoothed mask
        """
        # create the disk-shaped kernel for the following image processing,
        r = 3
        kernel = np.ones((2*r, 2*r), np.uint8)
        for x in range(0, 2*r):
            for y in range(0, 2*r):
                if(x - r + 0.5)**2 + (y - r + 0.5)**2 > r**2:
                    kernel[x, y] = 0

        # remove noise
        # see http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

        return mask
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Morphing(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    ks=obj.kernel
    kernel = np.ones((ks,ks),np.uint8)
    if obj.filter == 'dilation':
        dilation = cv2.dilate(img,kernel,iterations = 1)
        img=dilation
    if obj.filter == 'erosion':
        dilation = cv2.erode(img,kernel,iterations = 1)
        img=dilation
    if obj.filter == 'opening':
        dilation = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        img=dilation
    if obj.filter == 'closing':
        dilation = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
        img=dilation

    obj.Proxy.img = img



#
# property functions for HoughLines
#
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def denoise_foreground(img, fgmask):
    img_bw = 255*(fgmask > 5).astype('uint8')
    se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
    se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
    mask = cv2.morphologyEx(img_bw, cv2.MORPH_CLOSE, se1)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
    mask = np.dstack([mask, mask, mask]) / 255
    img_dn = img * mask
    return img_dn
项目:cv-lane    作者:kendricktan    | 项目源码 | 文件源码
def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top

    # Gets metadata from our contours
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def __call__(self, image):
        """Returns a foreground mask of the image."""
        return cv2.morphologyEx(self.fgbg.apply(image), cv2.MORPH_OPEN,
                                self.strel)
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def cv2_morph_open(binary_image, size=5):
    import cv2
    from skimage.morphology import disk
    kernel = disk(size)
    result = cv2.morphologyEx(binary_image, cv2.MORPH_OPEN, kernel)
    return result
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def segment(self, im):
        mask = np.square(im.astype('float32') - self.bgim
                ).sum(axis=2) / 20
        mask = np.clip(mask, 0, 255).astype('uint8')
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)
        mask = cv2.dilate(mask, self.dilate_k)
        mask = mask.astype('uint8')
        return (mask > 10).astype('float32') *255
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def closing(mask):
    assert isinstance(mask, numpy.ndarray), 'mask must be a numpy array'
    assert mask.ndim == 2, 'mask must be a greyscale image'
    logger.debug("closing mask of shape {0}".format(mask.shape))

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)

    return mask
项目:doc2text    作者:jlsutherland    | 项目源码 | 文件源码
def reduce_noise_edges(im):
    structuring_element = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
    opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, structuring_element)
    maxed_rows = rank_filter(opening, -4, size=(1, 20))
    maxed_cols = rank_filter(opening, -4, size=(20, 1))
    debordered = np.minimum(np.minimum(opening, maxed_rows), maxed_cols)
    return debordered
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):          
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:retinal-exudates-detection    作者:getsanjeev    | 项目源码 | 文件源码
def extract_bv(image):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrast_enhanced_green_fundus = clahe.apply(image)
    # applying alternate sequential filtering (3 times closing opening)
    r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1)
    r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1)
    r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1)
    f4 = cv2.subtract(R3,contrast_enhanced_green_fundus)
    f5 = clahe.apply(f4)

    # removing very small contours through area parameter noise removal
    ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY)
    mask = np.ones(f5.shape[:2], dtype="uint8") * 255
    im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) <= 200:
            cv2.drawContours(mask, [cnt], -1, 0, -1)            
    im = cv2.bitwise_and(f5, f5, mask=mask)
    ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV)            
    newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)   

    # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood
    # vessels and also in an interval of area
    fundus_eroded = cv2.bitwise_not(newfin)
    xmask = np.ones(image.shape[:2], dtype="uint8") * 255
    x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)    
    for cnt in xcontours:
        shape = "unidentified"
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
        if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
            shape = "circle"    
        else:
            shape = "veins"
        if(shape=="circle"):
            cv2.drawContours(xmask, [cnt], -1, 0, -1)   

    finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)  
    blood_vessels = cv2.bitwise_not(finimage)
    dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1)
    #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
    blood_vessels_1 = cv2.bitwise_not(dilated)
    return blood_vessels_1
项目:gaps    作者:nemanja-m    | 项目源码 | 文件源码
def _filter_image(self, image):
        _, thresh = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)
        opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, (5, 5), iterations=3)

        return cv2.bitwise_not(opened)
项目:headlights    作者:Team395    | 项目源码 | 文件源码
def maskImg(image):
    #Convert image from RBG (red blue green) to HSV (hue shade value)
    maskedImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    #Convert image to binary using the predefined color arrays
    maskedImage = cv2.inRange(maskedImage, lowColor, highColor)
    #Removes white noise using an open transformation
    kernel = np.ones((4,4), np.uint8)
    #maskedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel)
    return maskedImage
项目:headlights    作者:Team395    | 项目源码 | 文件源码
def maskImg(image):
    #Convert image from RBG (red blue green) to HSV (hue shade value)
    maskedImage = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
    #Convert image to binary using the predefined color arrays
    maskedImage = cv2.inRange(maskedImage, lowColor, highColor)
    #Removes white noise using an open transformation
    kernel = np.ones((4,4), np.uint8)
    #maskedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel)
    return maskedImage

#Find and return two matching rectangular contours if they exist, otherwise return none.
项目:opencv-helpers    作者:abarrak    | 项目源码 | 文件源码
def remove_noise(image, kernel=(2, 2)):
  ''' removes noisy pixels in the area. '''
  return cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def opening(img, kernel_size):
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    return opening
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def opening(img,kernel_size):
    kernel = np.ones((kernel_size,kernel_size),np.uint8)
    opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    return opening
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def detect(self, image, mask = None):
        floatimage = np.float32(image)

        fb,fg,fr = cv2.split(floatimage)

        # red-to-blue channel operation
        ra = fr + fb
        rb = fr - fb
        rb[ra > 0] /= ra[ra > 0]
        #mi = np.min(rb)
        #ma = np.max(rb)
        #rb = np.uint8((rb - mi) / (ma - mi) * 255)

        # morphology open
        if self.kernel is None or self.kernel.shape[0] != Configuration.background_rect_size:
            self.kernel = np.ones((Configuration.background_rect_size, Configuration.background_rect_size), np.uint8) * 255

        result = cv2.morphologyEx(rb, cv2.MORPH_OPEN, self.kernel)

        # background subtraction
        # homogeneous background image V
        result = rb - result

        mi = np.min(result)
        ma = np.max(result)
        result = np.uint8((result - mi) / (ma - mi) * 255)

        # adaptive threshold T
        T, _ = cv2.threshold(result[mask == 0], 0, 1, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        # V(i, j) > T
        return np.uint8((T - np.float32(result)) <= 0)
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def roiMask(image, boundaries):
    scale = max([1.0, np.average(np.array(image.shape)[0:2] / 400.0)])
    shape = (int(round(image.shape[1] / scale)), int(round(image.shape[0] / scale)))

    small_color = cv2.resize(image, shape, interpolation=cv2.INTER_LINEAR)

    # reduce details and remove noise for better edge detection
    small_color = cv2.bilateralFilter(small_color, 8, 64, 64)
    small_color = cv2.pyrMeanShiftFiltering(small_color, 8, 64, maxLevel=1)
    small = cv2.cvtColor(small_color, cv2.COLOR_BGR2HSV)

    hue = small[::, ::, 0]
    intensity = cv2.cvtColor(small_color, cv2.COLOR_BGR2GRAY)

    edges = extractEdges(hue, intensity)
    roi = roiFromEdges(edges)
    weight_map = weightMap(hue, intensity, edges, roi)

    _, final_mask = cv2.threshold(roi, 5, 255, cv2.THRESH_BINARY)
    small = cv2.bitwise_and(small, small, mask=final_mask)

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))

    for (lower, upper) in boundaries:
        lower = np.array([lower, 80, 50], dtype="uint8")
        upper = np.array([upper, 255, 255], dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(small, lower, upper)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=3)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
        final_mask = cv2.bitwise_and(final_mask, mask)

    # blur the mask for better contour extraction
    final_mask = cv2.GaussianBlur(final_mask, (5, 5), 0)
    return (final_mask, weight_map, scale)
项目:cancer_nn    作者:tanmoyopenroot    | 项目源码 | 文件源码
def getOpeningImage(img):
    kernel = np.ones((35,35),np.uint8)
    opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    return opening
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def check_if_good_boundary(self, boundary, norm_height, norm_width, color_img):
        preprocess_bg_mask = PreprocessBackgroundMask(boundary)
        char_w = norm_width / 20
        remove_noise = PreprocessRemoveNonCharNoise(char_w)

        id_card_img_mask = preprocess_bg_mask.do(color_img)
        id_card_img_mask[0:int(norm_height*0.05),:] = 0
        id_card_img_mask[int(norm_height*0.95): ,:] = 0
        id_card_img_mask[:, 0:int(norm_width*0.05)] = 0
        id_card_img_mask[:, int(norm_width*0.95):] = 0

        remove_noise.do(id_card_img_mask)

#        se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
#        se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
#        mask = cv2.morphologyEx(id_card_img_mask, cv2.MORPH_CLOSE, se1)
#        id_card_img_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
#  
        ## remove right head profile
        left_half_id_card_img_mask = np.copy(id_card_img_mask)
        left_half_id_card_img_mask[:, norm_width/2:] = 0

        ## Try to find text lines and chars
        horizontal_sum = np.sum(left_half_id_card_img_mask, axis=1)
        line_ranges = extract_peek_ranges_from_array(horizontal_sum)

        return len(line_ranges) >= 5 and len(line_ranges) <= 7
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def opening_image(img, kernelSize=(4,4)):
    """
    Performs an image opening operation.

    Keyword arguments:
    kernelSize -- Size of the kernel to open the image. -- Default: (4,4)
    """

    kernel = np.ones(kernelSize, np.uint8)
    return cv.morphologyEx(img, cv.MORPH_OPEN, kernel)
项目:Fingers-Detection-using-OpenCV-and-Python    作者:lzane    | 项目源码 | 文件源码
def removeBG(frame):
    fgmask = bgModel.apply(frame)
    # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

    kernel = np.ones((3, 3), np.uint8)
    fgmask = cv2.erode(fgmask, kernel, iterations=1)
    res = cv2.bitwise_and(frame, frame, mask=fgmask)
    return res
项目:R-CNN_LIGHT    作者:YeongHyeon    | 项目源码 | 文件源码
def opening(binary_img=None, k_size=2, iterations=1):

    kernel = np.ones((k_size, k_size), np.uint8)

    return cv2.morphologyEx(binary_img, cv2.MORPH_OPEN, kernel, iterations=iterations) # iteration = loop
项目:GoogleVideo    作者:bw4sz    | 项目源码 | 文件源码
def background_apply(self):

        #Apply Subtraction
        #self.image = self.fgbg.apply(self.original_image,learningRate=self.args.moglearning)
        self.image = self.fgbg.apply(self.original_image)

        #Erode to remove noise, dilate the areas to merge bounded objects
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(15,15))
        self.image= cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel)
项目:SharkCV    作者:hammerhead226    | 项目源码 | 文件源码
def open(self, **kwargs):
        if 'shape' not in kwargs:
            kwargs['shape'] = cv2.MORPH_ELLIPSE
        if 'size' not in kwargs:
            kwargs['size'] = 3
        if kwargs['size'] > 0:
            kernel = cv2.getStructuringElement(kwargs['shape'], (kwargs['size'], kwargs['size']))
            self._ndarray = cv2.morphologyEx(self.ndarray, cv2.MORPH_OPEN, kernel)
            self._contours = None

    # Dilate/erode this mask's white area
项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def drawErosion(circles):
    ''' reduce the dataset of intersection points '''

    x=[]
    y=[]
    for c in circles:
        x.append(int(round(c[0])))
        y.append(int(round(c[1])))
    x=np.array(x)
    y=np.array(y)

    f=20
    bbox=[int(round(np.min(x)/f)),int(round(np.min(y)/f)),int(round(np.max(x)/f)),int(round(np.max(y)/f))]
    h=int(round(bbox[3]-bbox[1]))
    w=int(round(bbox[2]-bbox[0]))

    img = np.zeros((2*h,2*w,3), np.uint8)

    for c in circles:
        # draw the outer circle
        u,v=int(round(c[0]/f))-bbox[0],bbox[3]-int(round(c[1]/f))
        q=cv2.circle(img,(100+u,100+v),3,(0,255,0),3)

    kernel = np.ones((3,3),np.uint8)
    erosion = cv2.erode(img,kernel,iterations = 4)
    result=erosion

    #opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    #result=opening

    yy=cv2.circle(img,(1500,500),50,(0,0,255),3)
    yy=cv2.imshow("erosion ",result)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()


    circles2=[]
    for ix in range(result.shape[0]):
        for iy in range(result.shape[1]):
            if max(erosion[ix][iy])> 0:
                circles2.append([iy,-ix])

    return circles2
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def remove_pectoral(self, img, breast_mask, high_int_threshold=.8, 
                        morph_kn_size=3, n_morph_op=7, sm_kn_size=25):
        '''Remove the pectoral muscle region from an input image

        Args:
            img (2D array): input image as a numpy 2D array.
            breast_mask (2D array):
            high_int_threshold ([int]): a global threshold for high intensity 
                    regions such as the pectoral muscle. Default is 200.
            morph_kn_size ([int]): kernel size for morphological operations 
                    such as erosions and dilations. Default is 3.
            n_morph_op ([int]): number of morphological operations. Default is 7.
            sm_kn_size ([int]): kernel size for final smoothing (i.e. opening). 
                    Default is 25.
        Returns:
            an output image with pectoral muscle region removed as a numpy 
            2D array.
        Notes: this has not been tested on .dcm files yet. It may not work!!!
        '''
        # Enhance contrast and then thresholding.
        img_equ = cv2.equalizeHist(img)
        if high_int_threshold < 1.:
            high_th = int(img.max()*high_int_threshold)
        else:
            high_th = int(high_int_threshold)
        maxval = self.max_pix_val(img.dtype)
        _, img_bin = cv2.threshold(img_equ, high_th, 
                                   maxval=maxval, type=cv2.THRESH_BINARY)
        pect_marker_img = np.zeros(img_bin.shape, dtype=np.int32)
        # Sure foreground (shall be pectoral).
        pect_mask_init = self.select_largest_obj(img_bin, lab_val=maxval, 
                                                 fill_holes=True, 
                                                 smooth_boundary=False)
        kernel_ = np.ones((morph_kn_size, morph_kn_size), dtype=np.uint8)
        pect_mask_eroded = cv2.erode(pect_mask_init, kernel_, 
                                     iterations=n_morph_op)
        pect_marker_img[pect_mask_eroded > 0] = 255
        # Sure background - breast.
        pect_mask_dilated = cv2.dilate(pect_mask_init, kernel_, 
                                       iterations=n_morph_op)
        pect_marker_img[pect_mask_dilated == 0] = 128
        # Sure background - pure background.
        pect_marker_img[breast_mask == 0] = 64
        # Watershed segmentation.
        img_equ_3c = cv2.cvtColor(img_equ, cv2.COLOR_GRAY2BGR)
        cv2.watershed(img_equ_3c, pect_marker_img)
        img_equ_3c[pect_marker_img == -1] = (0, 0, 255)
        # Extract only the breast and smooth.
        breast_only_mask = pect_marker_img.copy()
        breast_only_mask[breast_only_mask == -1] = 0
        breast_only_mask = breast_only_mask.astype(np.uint8)
        breast_only_mask[breast_only_mask != 128] = 0
        breast_only_mask[breast_only_mask == 128] = 255
        kernel_ = np.ones((sm_kn_size, sm_kn_size), dtype=np.uint8)
        breast_only_mask = cv2.morphologyEx(breast_only_mask, cv2.MORPH_OPEN, 
                                            kernel_)
        img_breast_only = cv2.bitwise_and(img_equ, breast_only_mask)

        return (img_breast_only, img_equ_3c)
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def deploy(deploy_set, set_name=None):
    if set_name is None:
        set_name = deploy_set.split('/')[-2]
    mkdir(output_dir+'/'+set_name+'/')
    logging.info("Predicting %s:"%(set_name)) 
    _, img_name = get_files_in_folder(deploy_set, '.bmp')
    if len(img_name) == 0:
        deploy_set = deploy_set+'images/'
        _, img_name = get_files_in_folder(deploy_set, '.bmp')
    img_size = misc.imread(deploy_set+img_name[0]+'.bmp', mode='L').shape
    img_size = np.array(img_size, dtype=np.int32)/8*8      
    main_net_model = get_main_net((img_size[0],img_size[1],1), pretrain)
    _, img_name = get_files_in_folder(deploy_set, '.bmp')
    time_c = []
    for i in xrange(0,len(img_name)):
        logging.info("%s %d / %d: %s"%(set_name, i+1, len(img_name), img_name[i]))
        time_start = time()    
        image = misc.imread(deploy_set+img_name[i]+'.bmp', mode='L') / 255.0
        image = image[:img_size[0],:img_size[1]]      
        image = np.reshape(image,[1, image.shape[0], image.shape[1], 1])
        enhance_img, ori_out_1, ori_out_2, seg_out, mnt_o_out, mnt_w_out, mnt_h_out, mnt_s_out = main_net_model.predict(image) 
        time_afterconv = time()
        round_seg = np.round(np.squeeze(seg_out))
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5, 5))
        seg_out = cv2.morphologyEx(round_seg, cv2.MORPH_OPEN, kernel)
        mnt = label2mnt(np.squeeze(mnt_s_out)*np.round(np.squeeze(seg_out)), mnt_w_out, mnt_h_out, mnt_o_out, thresh=0.5)
        mnt_nms = nms(mnt)
        ori = sess.run(ori_highest_peak(ori_out_1))                           
        ori = (np.argmax(ori, axis=-1)*2-90)/180.*np.pi  
        time_afterpost = time()
        mnt_writer(mnt_nms, img_name[i], img_size, "%s/%s/%s.mnt"%(output_dir, set_name, img_name[i]))        
        draw_ori_on_img(image, ori, np.ones_like(seg_out), "%s/%s/%s_ori.png"%(output_dir, set_name, img_name[i]))        
        draw_minutiae(image, mnt_nms[:,:3], "%s/%s/%s_mnt.png"%(output_dir, set_name, img_name[i]))
        misc.imsave("%s/%s/%s_enh.png"%(output_dir, set_name, img_name[i]), np.squeeze(enhance_img)*ndimage.zoom(np.round(np.squeeze(seg_out)), [8,8], order=0))
        misc.imsave("%s/%s/%s_seg.png"%(output_dir, set_name, img_name[i]), ndimage.zoom(np.round(np.squeeze(seg_out)), [8,8], order=0)) 
        io.savemat("%s/%s/%s.mat"%(output_dir, set_name, img_name[i]), {'orientation':ori, 'orientation_distribution_map':ori_out_1})
        time_afterdraw = time()
        time_c.append([time_afterconv-time_start, time_afterpost-time_afterconv, time_afterdraw-time_afterpost])
        logging.info("load+conv: %.3fs, seg-postpro+nms: %.3f, draw: %.3f"%(time_c[-1][0],time_c[-1][1],time_c[-1][2]))
    time_c = np.mean(np.array(time_c),axis=0)
    logging.info("Average: load+conv: %.3fs, oir-select+seg-post+nms: %.3f, draw: %.3f"%(time_c[0],time_c[1],time_c[2]))
    return
项目:FingerNet    作者:felixTY    | 项目源码 | 文件源码
def deploy(deploy_set, set_name=None):
    if set_name is None:
        set_name = deploy_set.split('/')[-2]
    mkdir(output_dir+'/'+set_name+'/')
    logging.info("Predicting %s:"%(set_name)) 
    _, img_name = get_files_in_folder(deploy_set, '.bmp')
    if len(img_name) == 0:
        deploy_set = deploy_set+'images/'
        _, img_name = get_files_in_folder(deploy_set, '.bmp')
    img_size = misc.imread(deploy_set+img_name[0]+'.bmp', mode='L').shape
    img_size = np.array(img_size, dtype=np.int32)/8*8      
    main_net_model = get_main_net((img_size[0],img_size[1],1), pretrain)
    _, img_name = get_files_in_folder(deploy_set, '.bmp')
    time_c = []
    for i in xrange(0,len(img_name)):
        logging.info("%s %d / %d: %s"%(set_name, i+1, len(img_name), img_name[i]))
        time_start = time()    
        image = misc.imread(deploy_set+img_name[i]+'.bmp', mode='L') / 255.0
        image = image[:img_size[0],:img_size[1]]      
        image = np.reshape(image,[1, image.shape[0], image.shape[1], 1])
        enhance_img, ori_out_1, ori_out_2, seg_out, mnt_o_out, mnt_w_out, mnt_h_out, mnt_s_out = main_net_model.predict(image) 
        time_afterconv = time()
        round_seg = np.round(np.squeeze(seg_out))
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5, 5))
        seg_out = cv2.morphologyEx(round_seg, cv2.MORPH_OPEN, kernel)
        mnt = label2mnt(np.squeeze(mnt_s_out)*np.round(np.squeeze(seg_out)), mnt_w_out, mnt_h_out, mnt_o_out, thresh=0.5)
        mnt_nms = nms(mnt)
        ori = sess.run(ori_highest_peak(ori_out_1))                           
        ori = (np.argmax(ori, axis=-1)*2-90)/180.*np.pi  
        time_afterpost = time()
        mnt_writer(mnt_nms, img_name[i], img_size, "%s/%s/%s.mnt"%(output_dir, set_name, img_name[i]))        
        draw_ori_on_img(image, ori, np.ones_like(seg_out), "%s/%s/%s_ori.png"%(output_dir, set_name, img_name[i]))        
        draw_minutiae(image, mnt_nms[:,:3], "%s/%s/%s_mnt.png"%(output_dir, set_name, img_name[i]))
        misc.imsave("%s/%s/%s_enh.png"%(output_dir, set_name, img_name[i]), np.squeeze(enhance_img)*ndimage.zoom(np.round(np.squeeze(seg_out)), [8,8], order=0))
        misc.imsave("%s/%s/%s_seg.png"%(output_dir, set_name, img_name[i]), ndimage.zoom(np.round(np.squeeze(seg_out)), [8,8], order=0)) 
        io.savemat("%s/%s/%s.mat"%(output_dir, set_name, img_name[i]), {'orientation':ori, 'orientation_distribution_map':ori_out_1})
        time_afterdraw = time()
        time_c.append([time_afterconv-time_start, time_afterpost-time_afterconv, time_afterdraw-time_afterpost])
        logging.info("load+conv: %.3fs, seg-postpro+nms: %.3f, draw: %.3f"%(time_c[-1][0],time_c[-1][1],time_c[-1][2]))
    time_c = np.mean(np.array(time_c),axis=0)
    logging.info("Average: load+conv: %.3fs, oir-select+seg-post+nms: %.3f, draw: %.3f"%(time_c[0],time_c[1],time_c[2]))
    return