Python cv2 模块,Laplacian() 实例源码

我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用cv2.Laplacian()

项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def try_approximate_corners_blur(self, board_dims, sharpness_threshold):
        sharpness = cv2.Laplacian(self.frame, cv2.CV_64F).var()
        if sharpness < sharpness_threshold:
            return False
        found, corners = cv2.findChessboardCorners(self.frame, board_dims)
        self.current_image_points = corners
        return found
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bibs(image):
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  binary = cv2.GaussianBlur(gray,(5,5),0)
  ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
  #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);

  #lapl = cv2.Laplacian(image,cv2.CV_64F)
  #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
  #blurred = cv2.GaussianBlur(lapl,(5,5),0)
  #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #cv2.imwrite("lapl.jpg", lapl)

  edges = cv2.Canny(image,175,200)
  cv2.imwrite("edges.jpg", edges)
  binary = edges

  cv2.imwrite("binary.jpg", binary)
  contours,hierarchy = find_contours(binary)

  return get_rectangles(contours)
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def EdgeDetection(img):
    img = cv2.fastNlMeansDenoising(img,None,3,7,21)
    _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
    denoise_img = img
    laplacian = cv2.Laplacian(img,cv2.CV_64F)
    sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)  # x
    sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)  # y
    canny = cv2.Canny(img,100,200)
    contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}

# GrayScale Image Convertor
# https://extr3metech.wordpress.com
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def EdgeDetection(img):
    img = cv2.fastNlMeansDenoising(img,None,3,7,21)
    _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
    denoise_img = img
    laplacian = cv2.Laplacian(img,cv2.CV_64F)
    sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)  # x
    sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)  # y
    canny = cv2.Canny(img,100,200)
    contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}

# GrayScale Image Convertor
# https://extr3metech.wordpress.com
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _filter(img, method, k):
        if method == 'Edge gradient':
            sy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=k)
            sx = cv2.Sobel(img, ddepth=cv2.CV_64F,dx=1, dy=0, ksize=k)
#             sx = sobel(img, axis=0, mode='constant')
#             sy = sobel(img, axis=1, mode='constant')
            return np.hypot(sx, sy)
        if method == 'Sobel-H':
            return cv2.Sobel(img, ddepth=cv2.CV_64F,dx=0, dy=1, ksize=k)
        #sobel(img, axis=0, mode='constant')
        if method == 'Sobel-V':
            return cv2.Sobel(img, ddepth=cv2.CV_64F,dx=1, dy=0, ksize=k)
        #sobel(img, axis=1, mode='constant')
        if method == 'Laplace':
            return cv2.Laplacian(img, ddepth=cv2.CV_64F,ksize=5)
        #laplace(img)
项目:Vision-based-parking-lot-availability-OpenCV    作者:Saar1312    | 项目源码 | 文件源码
def getEdges(gray,detector,min_thr=None,max_thr=None):
    """
        Where detector in {1,2,3,4}
        1: Laplacian
        2: Sobelx
        3: Sobely
        4: Canny
        5: Sobelx with possitive and negative slope (in 2 negative slopes are lost) 
    """
    if min_thr is None:
        min_thr = 100
        max_thr = 200
    if detector == 1:
        return cv2.Laplacian(gray,cv2.CV_64F)
    elif detector == 2:
        return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1)
    elif detector == 3:
        return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1)
    elif detector == 4:
        return cv2.Canny(gray,min_thr,max_thr)  # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the
                                                # intensity gradient -value that measures how different is a pixel to its neighbors-)
    elif detector == 5:
        sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5)
        abs_sobel64f = np.absolute(sobelx64f)
        return np.uint8(abs_sobel64f)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def variance_of_laplacian(im): 
    """
    Compute the Laplacian of the image and then return the focus
    measure, which is simply the variance of the Laplacian
    http://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
    """
    return cv2.Laplacian(im, cv2.CV_64F).var()
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def variance_of_laplacian(image):
    return cv2.Laplacian(image, cv2.CV_64F).var()
项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def get_blur(frame, scale):
    frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    fm = cv2.Laplacian(gray, cv2.CV_64F).var()
    return fm
项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def get_blur(frame, scale):
    frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    fm = cv2.Laplacian(gray, cv2.CV_64F).var()
    return fm
项目:BlurDetection2    作者:WillBrennan    | 项目源码 | 文件源码
def estimate_blur(image, threshold=100):
    if image.ndim == 3:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    blur_map = cv2.Laplacian(image, cv2.CV_64F)
    score = numpy.var(blur_map)
    return blur_map, score, bool(score < threshold)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def varianceOfLaplacian(img):
    ''''LAPV' algorithm (Pech2000)'''
    lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F)
    stdev = cv2.meanStdDev(lap)[1]
    s = stdev[0]**2
    return s[0]
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def _blur_index(self, img):
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        return cv2.Laplacian(img_gray, cv2.CV_64F).var()
项目:opencv-plgs    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, snap, img, para = None):
        return cv2.Laplacian(img, -1)
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def match_template_mask(image, template, mask=None, method=None, sigma=0.33):
    """Match template against image applying mask to template using method.
    Method can be either of (None, 'laplacian', 'sobel', 'scharr', 'prewitt',
    'roberts', 'canny').
    Returns locations to look for max values."""
    if mask is not None:
        if method:
            kernel = np.ones((3, 3), np.uint8)
            mask = cv2.erode(mask, kernel)
            if method == 'laplacian':
                # use CV_64F to not loose edges, convert to uint8 afterwards
                edge_image = np.uint8(np.absolute(
                    cv2.Laplacian(image, cv2.CV_64F)))
                edge_template = np.uint8(np.absolute(
                    cv2.Laplacian(template, cv2.CV_64F)
                ))
            elif method in ('sobel', 'scharr', 'prewitt', 'roberts'):
                filter_func = getattr(skfilters, method)
                edge_image = filter_func(image)
                edge_template = filter_func(template)
                edge_image = convert(edge_image)
                edge_template = convert(edge_template)
            else:  # method == 'canny'
                values = np.hstack([image.ravel(), template.ravel()])
                median = np.median(values)
                lower = int(max(0, (1.0 - sigma) * median))
                upper = int(min(255, (1.0 + sigma) * median))
                edge_image = cv2.Canny(image, lower, upper)
                edge_template = cv2.Canny(template, lower, upper)
            results = cv2.matchTemplate(edge_image, edge_template & mask,
                                        cv2.TM_CCOEFF_NORMED)
        else:
            results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED,
                                        mask)
    else:
        results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
    return results
项目:CodeLabs    作者:TheIoTLearningInitiative    | 项目源码 | 文件源码
def get_frame(self):
    ret, frame = self.cap.read()
    laplacian = cv2.Laplacian(frame,cv2.CV_64F)
    cv2.imwrite('image.jpg',np.hstack((frame,laplacian)))
    return open('image.jpg', 'rb').read()
项目:CodeLabs    作者:TheIoTLearningInitiative    | 项目源码 | 文件源码
def get_frame(self):
        ret, frame = self.cap.read()
        laplacian = cv2.Laplacian(frame,cv2.CV_64F)
        cv2.imwrite('imagewritten.jpg',np.hstack((frame,laplacian)))
        return open('imagewritten.jpg', 'rb').read()
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def analyse_isomaps(self):
        print ('analysing isomaps...')
        for example in self.examples_all:
            img = cv2.imread(example.images[0], cv2.IMREAD_UNCHANGED)
            #blurryness_map = cv2.Laplacian(img, cv2.CV_64F)
            #blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges
            #example.blurryness = blurryness_map.var()
            example.blurryness = _get_gradient_magnitude(img)

            example.coverage = _calc_isomap_coverage(img)
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def isomap_playground():
    isomaps =[]
    for i in range(len(isomap_paths)):
        isomaps.append(cv2.imread(isomap_paths[i], cv2.IMREAD_UNCHANGED))

    old_isomap_merged = np.zeros([ISOMAP_SIZE, ISOMAP_SIZE, 4], dtype='uint8')

    all_isomaps_merged = merge(isomaps)
    show_isomap('all_isomaps_merged', all_isomaps_merged)
    #cv2.waitKey()
    #cv2.destroyAllWindows()
    #exit()

    for i in range(len(isomaps)):
        new_isomap_merged = merge([old_isomap_merged, isomaps[i]])
        #blurryness = cv2.Laplacian(isomaps[i], cv2.CV_64F).var()
        blurryness_map = cv2.Laplacian(isomaps[i], cv2.CV_64F)
        blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges
        blurryness = blurryness_map.var()
        #show_isomap('laplac',cv2.Laplacian(isomaps[i], cv2.CV_8U))
        #print ('max', np.max(cv2.Laplacian(isomaps[i], cv2.CV_64F)), 'min', np.min(cv2.Laplacian(isomaps[i], cv2.CV_64F)))
        coverage = calc_isomap_coverage(isomaps[i])
        print(isomap_paths[i]," isomap coverage:",coverage,"blur detection:",blurryness, "overall score", coverage*coverage*blurryness)
        show_isomap('new isomap', isomaps[i])
        show_isomap('merge', new_isomap_merged)
        cv2.waitKey()

        old_isomap_merged = new_isomap_merged


    #cv2.imwrite('/user/HS204/m09113/Desktop/merge_test.png', isomap_merged)

    #cv2.waitKey()
    #cv2.destroyAllWindows()
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def EdgeDetection(img):
    # img = cv2.medianBlur(img,5)
    img = cv2.fastNlMeansDenoising(img,None,3,7,21)
    _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
    denoise_img = img
    # print(img)
    # cv2.imwrite("Denoise.jpg",img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    # convolute with proper kernels
    laplacian = cv2.Laplacian(img,cv2.CV_64F)
    sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)  # x
    sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)  # y
    # sobel2y = cv2.Sobel(sobely,cv2.CV_64F,0,1,ksize=3)
    # sobelxy = cv2.Sobel(img,cv2.CV_64F,1,1,ksize=5)  # y
    canny = cv2.Canny(img,100,200)
    contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # print(canny)
    # cv2.imwrite('laplacian.jpg',laplacian)
    # cv2.imwrite('sobelx.jpg',sobelx)
    # cv2.imwrite('sobely.jpg',sobely)
    # cv2.imwrite('sobelxy.jpg',sobelxy)
    # cv2.imwrite('canny.jpg',canny)

    # plt.subplot(3,2,1),plt.imshow(img,cmap = 'gray')
    # plt.title('Original'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,2),plt.imshow(laplacian,cmap = 'gray')
    # plt.title('Laplacian'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,3),plt.imshow(sobelx,cmap = 'gray')
    # plt.title('Sobel X'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,4),plt.imshow(sobely,cmap = 'gray')
    # plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,4),plt.imshow(sobelxy,cmap = 'gray')
    # plt.title('Sobel XY'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,5),plt.imshow(canny,cmap = 'gray')
    # plt.title('Canny'), plt.xticks([]), plt.yticks([])

    # plt.show()
    # return {"denoise":img}
    return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}
项目:cnn-traffic-light-evaluation    作者:takeitallsource    | 项目源码 | 文件源码
def contrast_normalization(image):
    blurred = cv2.GaussianBlur(image, (3,3), 0)
    return cv2.Laplacian(blurred, cv2.CV_8U, 3)
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def detect(self, image):
        floatimage = cv2.cvtColor(np.float32(image), cv2.COLOR_BGR2GRAY) / 255

        if self.gaussian is None or self.gaussian.shape[0] != Configuration.log_kernel_size:
            self.gaussian = cv2.getGaussianKernel(Configuration.log_kernel_size, -1, cv2.CV_32F)

        gaussian_filtered = cv2.sepFilter2D(floatimage, cv2.CV_32F, self.gaussian, self.gaussian)

        # LoG
        filtered = cv2.Laplacian(gaussian_filtered, cv2.CV_32F, ksize=Configuration.log_block_size)

        # DoG
        #gaussian2 = cv2.getGaussianKernel(Configuration.log_block_size, -1, cv2.CV_32F)
        #gaussian_filtered2 = cv2.sepFilter2D(floatimage, cv2.CV_32F, gaussian2, gaussian2)
        #filtered = gaussian_filtered - gaussian_filtered2

        mi = np.min(filtered)
        ma = np.max(filtered)

        if mi - ma != 0:
            filtered = 1 - (filtered - mi) / (ma - mi)

        _, thresholded = cv2.threshold(filtered, Configuration.log_threshold, 1.0, cv2.THRESH_BINARY)
        self.debug = thresholded
        thresholded = np.uint8(thresholded)

        contours = None

        if int(cv2.__version__.split('.')[0]) == 2:
            contours, _ = cv2.findContours(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, _ = cv2.findContours(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        candidates = []

        for i in range(len(contours)):
            rect = cv2.boundingRect(contours[i])
            v1 = rect[0:2]
            v2 = np.add(rect[0:2], rect[2:4])
            if rect[2] < Configuration.log_max_rect_size and rect[3] < Configuration.log_max_rect_size:
                roi = floatimage[v1[1]:v2[1], v1[0]:v2[0]]
                _, _, _, maxLoc = cv2.minMaxLoc(roi)
                maxLoc = np.add(maxLoc, v1)

                candidates.append(maxLoc)

        self.candidates = candidates

        return candidates