Python cv2 模块,Sobel() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.Sobel()

项目:logo-detect    作者:sunbinbin1991    | 项目源码 | 文件源码
def hog(img):
  h, w = img.shape

  gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
  gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)

  mag, ang = cv2.cartToPolar(gx, gy)
  bins = np.int32(bin_n*ang/(2*np.pi))    # quantizing binvalues in (0...16)


  bin_cells = ()
  mag_cells = ()
  for i in range(wc):
    for j in range(hc):
      bin_cells += (bins[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)
      mag_cells += (mag[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)

#np.bincount() return times of each number appear
  hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
  hist = np.hstack(hists)     # hist is a 16*wc*hc vector

  return hist
项目:spfeas    作者:jgrss    | 项目源码 | 文件源码
def get_mag_ang(img):

    """
    Gets image gradient (magnitude) and orientation (angle)

    Args:
        img

    Returns:
        Gradient, orientation
    """

    img = np.sqrt(img)

    gx = cv2.Sobel(np.float32(img), cv2.CV_32F, 1, 0)
    gy = cv2.Sobel(np.float32(img), cv2.CV_32F, 0, 1)

    mag, ang = cv2.cartToPolar(gx, gy)

    return mag, ang, gx, gy
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def EdgeDetection(img):
    img = cv2.fastNlMeansDenoising(img,None,3,7,21)
    _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
    denoise_img = img
    laplacian = cv2.Laplacian(img,cv2.CV_64F)
    sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)  # x
    sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)  # y
    canny = cv2.Canny(img,100,200)
    contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}

# GrayScale Image Convertor
# https://extr3metech.wordpress.com
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def EdgeDetection(img):
    img = cv2.fastNlMeansDenoising(img,None,3,7,21)
    _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
    denoise_img = img
    laplacian = cv2.Laplacian(img,cv2.CV_64F)
    sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)  # x
    sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)  # y
    canny = cv2.Canny(img,100,200)
    contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}

# GrayScale Image Convertor
# https://extr3metech.wordpress.com
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def MyDenoiseSobely(path):
    img_gray = ToGrayImage(path)
    img_mydenoise = MyDenoise(img_gray,5)
    img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
    _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
    sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
    return sobely
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def hls_select(image, thresh=(0, 255)):
    # 1) Convert to HLS color space
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    H = hls[:, :, 0]
    L = hls[:, :, 1]
    S = hls[:, :, 2]
    # 2) Apply a threshold to the S channel
    thresh = (90, 255)
    binary = np.zeros_like(S)
    binary[(S > thresh[0]) & (S <= thresh[1])] = 1
    # 3) Return a binary image of threshold result
    return binary


# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):

    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the gradient in x and y separately
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # 3) Take the absolute value of the x and y gradients
    abs_sobelx = np.absolute(sobelx)
    abs_sobely = np.absolute(sobely)
    # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
    absgraddir = np.arctan2(abs_sobely, abs_sobelx)
    # 5) Create a binary mask where direction thresholds are met
    binary_output = np.zeros_like(absgraddir)
    binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
    # 6) Return this mask as your binary_output image
    return binary_output


# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the gradient in x and y separately
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # 3) Calculate the magnitude
    gradmag = np.sqrt(sobelx**2 + sobely**2)
    # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
    scale_factor = np.max(gradmag)/255
    gradmag = (gradmag/scale_factor).astype(np.uint8)
    # 5) Create a binary mask where mag thresholds are met
    binary_output = np.zeros_like(gradmag)
    binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
    # 6) Return this mask as your binary_output image
    return binary_output


# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):

    # Apply the following steps to img
    # 1) Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 2) Take the derivative in x or y given orient = 'x' or 'y'
    if orient == 'x':
        sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
    if orient == 'y':
        sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
    # 3) Take the absolute value of the derivative or gradient
    abs_sobel = np.absolute(sobel)
    # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # 5) Create a mask of 1's where the scaled gradient magnitude
            # is > thresh_min and < thresh_max
    binary_output = np.zeros_like(scaled_sobel)
    binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
    # 6) Return this mask as your binary_output image
    return binary_output
项目:logo-detect    作者:sunbinbin1991    | 项目源码 | 文件源码
def hog(img):
  h, w = img.shape

  gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
  gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)

  mag, ang = cv2.cartToPolar(gx, gy)
  bins = np.int32(bin_n*ang/(2*np.pi))    # quantizing binvalues in (0...16)


  bin_cells = ()
  mag_cells = ()
  for i in range(wc):
    for j in range(hc):
      bin_cells += (bins[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)
      mag_cells += (mag[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)


  hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
  hist = np.hstack(hists)     # hist is a 16*wc*hc vector

  return hist
项目:logo-detect    作者:sunbinbin1991    | 项目源码 | 文件源码
def hog(img):
  h, w = img.shape

  gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
  gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)

  mag, ang = cv2.cartToPolar(gx, gy)
  bins = np.int32(bin_n*ang/(2*np.pi))    # quantizing binvalues in (0...bin_n)


  bin_cells = ()
  mag_cells = ()
  for i in range(wc):
    for j in range(hc):
      bin_cells += (bins[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)
      mag_cells += (mag[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)


  hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
  hist = np.hstack(hists)     # hist is a bin_n*wc*hc vector

  return hist
项目:logo-detect    作者:sunbinbin1991    | 项目源码 | 文件源码
def hog(img):
  h, w = img.shape

  gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
  gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)

  mag, ang = cv2.cartToPolar(gx, gy)
  bins = np.int32(bin_n*ang/(2*np.pi))    # quantizing binvalues in (0...16)


  bin_cells = ()
  mag_cells = ()
  for i in range(wc):
    for j in range(hc):
      bin_cells += (bins[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)
      mag_cells += (mag[j*h/hc:(j+1)*h/hc, i*w/wc:(i+1)*w/wc],)


  hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
  hist = np.hstack(hists)     # hist is a 16*wc*hc vector

  return hist
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def __init__(self, imageDisplay):
        Tool.__init__(self, imageDisplay)
        pa = self.setParameterMenu()
        self.createResultInDisplayParam(pa)

        self.pConvMethod = pa.addChild({
            'name': 'Method',
            'type': 'list',
            'value': 'Edge gradient',
            'limits': ['Edge gradient', 'Sobel-H',
                       'Sobel-V', 'Laplace']})

        self.pKsize = pa.addChild({
            'name': 'kernel size',
            'type': 'int',
            'value': 3,
            'limits': [3,15]})
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _filter(img, method, k):
        if method == 'Edge gradient':
            sy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=k)
            sx = cv2.Sobel(img, ddepth=cv2.CV_64F,dx=1, dy=0, ksize=k)
#             sx = sobel(img, axis=0, mode='constant')
#             sy = sobel(img, axis=1, mode='constant')
            return np.hypot(sx, sy)
        if method == 'Sobel-H':
            return cv2.Sobel(img, ddepth=cv2.CV_64F,dx=0, dy=1, ksize=k)
        #sobel(img, axis=0, mode='constant')
        if method == 'Sobel-V':
            return cv2.Sobel(img, ddepth=cv2.CV_64F,dx=1, dy=0, ksize=k)
        #sobel(img, axis=1, mode='constant')
        if method == 'Laplace':
            return cv2.Laplacian(img, ddepth=cv2.CV_64F,ksize=5)
        #laplace(img)
项目:Vision-based-parking-lot-availability-OpenCV    作者:Saar1312    | 项目源码 | 文件源码
def getEdges(gray,detector,min_thr=None,max_thr=None):
    """
        Where detector in {1,2,3,4}
        1: Laplacian
        2: Sobelx
        3: Sobely
        4: Canny
        5: Sobelx with possitive and negative slope (in 2 negative slopes are lost) 
    """
    if min_thr is None:
        min_thr = 100
        max_thr = 200
    if detector == 1:
        return cv2.Laplacian(gray,cv2.CV_64F)
    elif detector == 2:
        return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1)
    elif detector == 3:
        return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1)
    elif detector == 4:
        return cv2.Canny(gray,min_thr,max_thr)  # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the
                                                # intensity gradient -value that measures how different is a pixel to its neighbors-)
    elif detector == 5:
        sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5)
        abs_sobel64f = np.absolute(sobelx64f)
        return np.uint8(abs_sobel64f)
项目:lane-detection-raspberry-pi    作者:uvbakutan    | 项目源码 | 文件源码
def binary_extraction(self,image, ksize=3):
        # undistort first
        #image = self.undistort(image)

        color_bin = self.color_thresh(image,thresh=(90, 150))              # initial values 110, 255

        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

        sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize)
        sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize)

        gradx = self.abs_sobel_thresh(sobelx, thresh=(100, 190))             # initial values 40, 160
        grady = self.abs_sobel_thresh(sobely, thresh=(100, 190))             # initial values 40, 160
        mag_binary = self.mag_thresh(sobelx, sobely, mag_thresh=(100, 190))  # initial values 40, 160
        #dir_binary = self.dir_threshold(sobelx, sobely, thresh=(0.7, 1.3))

        combined = np.zeros_like(gradx)
        #combined[(((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))) | (color_bin==1) ] = 1
        combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) | (color_bin==1) ] = 1
        #combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) ] = 1

        return combined

    # transform perspective
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_sobel_binary(im, blur_sz):

    # ??????????????
    img_blur = cv2.GaussianBlur(im,blur_sz,0)
    if len(img_blur.shape) == 3:
        blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY) 
    else:
        blur_gray = img_blur

    # ??Sobel????
    sobelx = cv2.Sobel(blur_gray,cv2.CV_16S,1,0,ksize=3)
    abs_sobelx = np.absolute(sobelx)
    sobel_8u = np.uint8(abs_sobelx)
    img_show_hook("Sobel??", sobel_8u)

    # OTSU??????    
    ret, thd = cv2.threshold(sobel_8u, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)    
    thd_abs = cv2.convertScaleAbs(thd)
    bgimg = cv2.addWeighted(thd_abs, 1, 0, 0, 0)

    img_show_hook("OTSU????", bgimg)

    return bgimg
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_sobel_binary(im, blur_sz):

    # ??????????????
    img_blur = cv2.GaussianBlur(im,blur_sz,0)
    if len(img_blur.shape) == 3:
        blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY) 
    else:
        blur_gray = img_blur

    # ??Sobel????
    sobelx = cv2.Sobel(blur_gray,cv2.CV_16S,1,0,ksize=3)
    abs_sobelx = np.absolute(sobelx)
    sobel_8u = np.uint8(abs_sobelx)
    img_show_hook("Sobel??", sobel_8u)

    # OTSU??????    
    ret, thd = cv2.threshold(sobel_8u, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)    
    thd_abs = cv2.convertScaleAbs(thd)
    bgimg = cv2.addWeighted(thd_abs, 1, 0, 0, 0)

    img_show_hook("OTSU????", bgimg)

    return bgimg
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    img=cv2.medianBlur(img,5)
    kernel=np.ones((3,3),np.uint8)

    #img=cv2.erode(img,kernel,iterations = 1)
    sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    dilation = cv2.dilate(sobel, element2, iterations = 1)
    erosion = cv2.erode(dilation, element1, iterations = 1)
    dilation2 = cv2.dilate(erosion, element2,iterations = 3)
    #img=cv2.dilate(img,kernel,iterations = 1)
    #img=cv2.Canny(img,100,200)
    return dilation2
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def logoDetect(img,imgo):
    '''???????????????'''
    imglogo=imgo.copy()
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
    #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
    ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
    img=cv2.Canny(img,100,200)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    img = cv2.dilate(img, element2,iterations = 1)
    img = cv2.erode(img, element1, iterations = 3)
    img = cv2.dilate(img, element2,iterations = 3)

    #????
    im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    tema=0
    result=[]
    for con in contours:
        x,y,w,h=cv2.boundingRect(con)
        area=w*h
        ratio=max(w/h,h/w)
        if area>300 and area<20000 and ratio<2:
            if area>tema:
                tema=area
                result=[x,y,w,h]
                ratio2=ratio
    #?????????????????,??????????
    logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
    logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
    cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
    cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
    print tema,ratio2,result
    logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
    cv2.imwrite('./logo2.jpg',logo2)

    return img
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
    img = np.copy(img)
    # Convert to HSV color space and separate the V channel
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
    l_channel = hsv[:,:,1]
    s_channel = hsv[:,:,2]
    # Sobel x
    sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobel)
    sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

    # Threshold color channel
    s_binary = np.zeros_like(s_channel)
    s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
    # Stack each channel
    # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might
    # be beneficial to replace this channel with something else.
    color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
    return color_binary
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Apply x or y gradient with the OpenCV Sobel() function
    # and take the absolute value
    if orient == 'x':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
    if orient == 'y':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
    # Rescale back to 8 bit integer
    scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
    # Create a copy and apply the threshold
    binary_output = np.zeros_like(scaled_sobel)
    # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
    binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1

    # Return the result
    return binary_output
项目:HandGesturePy    作者:arijitx    | 项目源码 | 文件源码
def preprocess_hog(digits):
    samples = []
    for img in digits:
        gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
        gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
        mag, ang = cv2.cartToPolar(gx, gy)
        bin_n = 16
        bin = np.int32(bin_n*ang/(2*np.pi))
        bin_cells = bin[:100,:100], bin[100:,:100], bin[:100,100:], bin[100:,100:]
        mag_cells = mag[:100,:100], mag[100:,:100], mag[:100,100:], mag[100:,100:]
        hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
        hist = np.hstack(hists)

        # transform to Hellinger kernel
        eps = 1e-7
        hist /= hist.sum() + eps
        hist = np.sqrt(hist)
        hist /= norm(hist) + eps

        samples.append(hist)
    return np.float32(samples)


#Here goes my wrappers:
项目:HandGesturePy    作者:arijitx    | 项目源码 | 文件源码
def hog_single(img):
    samples=[]
    gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
    gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
    mag, ang = cv2.cartToPolar(gx, gy)
    bin_n = 16
    bin = np.int32(bin_n*ang/(2*np.pi))
    bin_cells = bin[:100,:100], bin[100:,:100], bin[:100,100:], bin[100:,100:]
    mag_cells = mag[:100,:100], mag[100:,:100], mag[:100,100:], mag[100:,100:]
    hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
    hist = np.hstack(hists)

    # transform to Hellinger kernel
    eps = 1e-7
    hist /= hist.sum() + eps
    hist = np.sqrt(hist)
    hist /= norm(hist) + eps

    samples.append(hist)
    return np.float32(samples)

#using Compute_hog too much time !
项目:answer-sheet-scan    作者:inuyasha2012    | 项目源码 | 文件源码
def get_init_process_img(roi_img):
    """
    ?????????????????????????????????????
    :param roi_img: ndarray
    :return: ndarray
    """
    h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
    v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
    img = cv2.add(h, v)
    img = cv2.convertScaleAbs(img)
    img = cv2.GaussianBlur(img, (3, 3), 0)
    ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    kernel = np.ones((1, 1), np.uint8)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = auto_canny(img)
    return img
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    graysrc = cv2.cvtColor(colorsrc, cv2.cv.CV_BGR2GRAY)
    graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

    ## gradient X ##
    gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
    gradx = cv2.convertScaleAbs(gradx)

    ## gradient Y ##
    grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
    grady = cv2.convertScaleAbs(grady)

    grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

    return grad
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def blur_measure(im): 
    """ See cv::videostab::calcBlurriness """

    H, W = im.shape[:2]
    gx = cv2.Sobel(im, cv2.CV_32F, 1, 0)
    gy = cv2.Sobel(im, cv2.CV_32F, 0, 1)
    norm_gx, norm_gy = cv2.norm(gx), cv2.norm(gy)
    return 1.0 / ((norm_gx ** 2 + norm_gy ** 2) / (H * W + 1e-6))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def sobel(im, dx=1, dy=1, blur=3): 
    if blur is None or blur == 0: 
        blur_im = im
    else: 
        blur_im = cv2.GaussianBlur(im, (blur,blur), 0)
    return cv2.Sobel(blur_im, cv2.CV_8U, dx, dy)
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def MyDenoiseSobely(path):
    img_gray = ToGrayImage(path)
    img_mydenoise = MyDenoise(img_gray,5)
    img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
    _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
    sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
    return sobely
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def MyDenoiseSobely(path):
    img_gray = ToGrayImage(path)
    img_mydenoise = MyDenoise(img_gray,5)
    img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
    _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
    sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
    return sobely
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def MyDenoiseSobely(path):
    img_gray = ToGrayImage(path)
    img_mydenoise = MyDenoise(img_gray,5)
    img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
    _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
    sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
    return sobely
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def MyDenoiseSobely(path):
    img_gray = ToGrayImage(path)
    img_mydenoise = MyDenoise(img_gray,5)
    img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21)
    _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO)
    sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3)
    return sobely
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
    img = np.copy(img)
    # Convert to HSV color space and separate the V channel
    hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
    l_channel = hsv[:,:,1]
    s_channel = hsv[:,:,2]
    # Sobel x
    sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobel)
    sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

    # Threshold color channel
    s_binary = np.zeros_like(s_channel)
    s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
    # Stack each channel
    # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might
    # be beneficial to replace this channel with something else.
    color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))
    return color_binary


# Define a function that thresholds the S-channel of HLS
# Use exclusive lower bound (>) and inclusive upper (<=)
项目:KCF    作者:Bruceeeee    | 项目源码 | 文件源码
def hog(img, bin_n=8, cell_size=4):
    img = cv2.resize(img,(128,128))
    gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
    gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
    mag, ang = cv2.cartToPolar(gx, gy)
    bin = np.int32(bin_n*ang/(2*np.pi))

    bin_cells = []
    mag_cells = []

    cellx = celly = cell_size

    for i in range(0,img.shape[0]/celly):
        for j in range(0,img.shape[1]/cellx):
            bin_cells.append(bin[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])
            mag_cells.append(mag[i*celly : i*celly+celly, j*cellx : j*cellx+cellx])   

    hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
    hist = np.hstack(hists)

    # transform to Hellinger kernel
    eps = 1e-7
    hist /= hist.sum() + eps
    hist = np.sqrt(hist)
    hist /= norm(hist) + eps
    hist_out = np.reshape(hist,(32,32,8))


    return hist_out
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def _create_derivative(cls, img):
        edges = cv2.Canny(img, 175, 320, apertureSize=3)
        # Create gradient map using Sobel
        sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=-1)
        sobely64f = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=-1)

        theta = np.arctan2(sobely64f, sobelx64f)
        if diagnostics:
            cv2.imwrite('edges.jpg',edges)
            cv2.imwrite('sobelx64f.jpg', np.absolute(sobelx64f))
            cv2.imwrite('sobely64f.jpg', np.absolute(sobely64f))
            # amplify theta for visual inspection
            theta_visible = (theta + np.pi)*255/(2*np.pi)
            cv2.imwrite('theta.jpg', theta_visible)
        return (edges, sobelx64f, sobely64f, theta)
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def edgedetect(channel):
    sobelx = cv2.Sobel(channel, cv2.CV_16S, 1, 0, ksize=3)
    sobely = cv2.Sobel(channel, cv2.CV_16S, 0, 1, ksize=3)
    sobel = np.hypot(sobelx, sobely)
    sobel[sobel > 255] = 255

    return sobel
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def __filter_candidate(greyscale_image, coord, neighborhood_size):
    window = greyscale_image[coord[0] - neighborhood_size:coord[0] + neighborhood_size + 1,
             coord[1] - neighborhood_size:coord[1] + neighborhood_size + 1]
    grad_x = cv2.Sobel(window, cv2.CV_32FC1, dx=1, dy=0, ksize=3)
    grad_y = cv2.Sobel(window, cv2.CV_32FC1, dx=0, dy=1, ksize=3)
    grad_mag = np.abs(grad_x) + np.abs(grad_y)
    grad_mag_flat = grad_mag.flatten()
    orientations_flat = (cv2.phase(grad_x, grad_y) % pi).flatten()  # phase accuracy: about 0.3 degrees
    hist = (np.histogram(orientations_flat, bins=64, range=(0, pi), weights=grad_mag_flat)[0] /
            (neighborhood_size * neighborhood_size))

    return hist, grad_mag
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def tenengrad(img, ksize=3):
    ''''TENG' algorithm (Krotkov86)'''
    Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize)
    Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize)
    FM = Gx*Gx + Gy*Gy
    mn = cv2.mean(FM)[0]
    if np.isnan(mn):
        return np.nanmean(FM)
    return mn
项目:Brain_Tumor_Segmentation    作者:KarthikRevanuru    | 项目源码 | 文件源码
def seg(path):
    p=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Test/'+"cut"+path+"_flair.nii.gz")])

    shap=p[0].shape
    print (shap)

    leng=shap[0]*shap[1]*shap[2]

    #pix=get_pixels(path)
    pc=concat(p)

    print (p[0].shape)
    px = cv2.Sobel(p[0],cv2.CV_64F,1,0,ksize=5)
    py = cv2.Sobel(p[0],cv2.CV_64F,0,1,ksize=5)

    print(time.strftime('%a %H:%M:%S'))
    pcx=concat1(px)
    pcy=concat1(py)


    print(time.strftime('%a %H:%M:%S'))
    pa=ndimage.filters.convolve(p[0],np.full((5, 5, 5), 1.0/125),mode='constant')

    print(time.strftime('%a %H:%M:%S'))
    pg=concat1(pa)

    print(time.strftime('%a %H:%M:%S'))
    X=reshape_feat(pc,pg,pcx,pcy,leng)
    print(time.strftime('%a %H:%M:%S'))

    return X
项目:Brain_Tumor_Segmentation    作者:KarthikRevanuru    | 项目源码 | 文件源码
def seg(path):
    p=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Train/'+"cut"+path+"_flair.nii.gz")])

    y=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Train/'+"cut"+path[4:]+"_seg.nii.gz")])


    shap=p[0].shape
    print (shap)

    leng=shap[0]*shap[1]*shap[2]

    #pix=get_pixels(path)
    pc=concat(p)

    yc=concat(y)
    print (p[0].shape)
    px = cv2.Sobel(p[0],cv2.CV_64F,1,0,ksize=5)
    py = cv2.Sobel(p[0],cv2.CV_64F,0,1,ksize=5)

    print(time.strftime('%a %H:%M:%S'))
    pcx=concat1(px)
    pcy=concat1(py)


    print(time.strftime('%a %H:%M:%S'))
    pa=ndimage.filters.convolve(p[0],np.full((5, 5, 5), 1.0/125),mode='constant')

    print(time.strftime('%a %H:%M:%S'))
    pg=concat1(pa)

    print(time.strftime('%a %H:%M:%S'))
    X=reshape_feat(pc,pg,pcx,pcy,leng)
    Y=reshape_seg(yc,leng)
    print(time.strftime('%a %H:%M:%S'))

    return X,Y
项目:recognizeFitExercise    作者:tyiannak    | 项目源码 | 文件源码
def getRGBS(img, PLOT = False):

    image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)

    # grab the image channels, initialize the tuple of colors,
    # the figure and the flattened feature vector   
    features = []
    featuresSobel = []
    Grayscale = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
    histG = cv2.calcHist([Grayscale], [0], None, [16], [0, 256])
    histG = histG / histG.sum()
    features.extend(histG[:,0].tolist())


    grad_x = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 1, 0, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
    grad_y = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 0, 1, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
    abs_grad_x = cv2.convertScaleAbs(grad_x)
    abs_grad_y = cv2.convertScaleAbs(grad_y)
    dst = cv2.addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0)
    histSobel = cv2.calcHist([dst], [0], None, [16], [0, 256])
    histSobel = histSobel / histSobel.sum()
    features.extend(histSobel[:,0].tolist())

    Fnames = []
    Fnames.extend(["Color-Gray"+str(i) for i in range(8)])
    Fnames.extend(["Color-GraySobel"+str(i) for i in range(8)])

    return features, Fnames
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def HLS_sobel(img, s_thresh=(120, 255), sx_thresh=(20, 255),l_thresh=(40,255)):
    img = np.copy(img)

    # Convert to HLS color space and separate the V channel
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
    #h_channel = hls[:,:,0]
    l_channel = hls[:,:,1]
    s_channel = hls[:,:,2]
    # Sobel x
    # sobelx = abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255))
    # l_channel_col=np.dstack((l_channel,l_channel, l_channel))
    sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobel)
    sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

    # Threshold saturation channel
    s_binary = np.zeros_like(s_channel)
    s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

    # Threshold lightness
    l_binary = np.zeros_like(l_channel)
    l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1

    channels = 255*np.dstack(( l_binary, sxbinary, s_binary)).astype('uint8')        
    binary = np.zeros_like(sxbinary)
    binary[((l_binary == 1) & (s_binary == 1) | (sxbinary==1))] = 1
    binary = 255*np.dstack((binary,binary,binary)).astype('uint8')            
    return  binary,channels
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def get_gradient(im):
    # Calculate the x and y gradients using Sobel operator
    grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=3)
    grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=3)

    # Combine the two gradients
    grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
    # print grad.dtype
    # print grad.shape
    return grad

# Based on: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/
项目:Hog-feature    作者:PENGZhaoqing    | 项目源码 | 文件源码
def global_gradient(self):
        gradient_values_x = cv2.Sobel(self.img, cv2.CV_64F, 1, 0, ksize=5)
        gradient_values_y = cv2.Sobel(self.img, cv2.CV_64F, 0, 1, ksize=5)
        gradient_magnitude = cv2.addWeighted(gradient_values_x, 0.5, gradient_values_y, 0.5, 0)
        gradient_angle = cv2.phase(gradient_values_x, gradient_values_y, angleInDegrees=True)
        return gradient_magnitude, gradient_angle
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def _get_gradient_magnitude(im):
    "Get magnitude of gradient for given image"
    ddepth = cv2.CV_32F
    dx = cv2.Sobel(im, ddepth, 1, 0)
    dy = cv2.Sobel(im, ddepth, 0, 1)
    dxabs = cv2.convertScaleAbs(dx)
    dyabs = cv2.convertScaleAbs(dy)
    mag = cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)

    return np.average(mag)
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    gau=cv2.GaussianBlur(gray,(5,5),0)
    ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    med=cv2.medianBlur(thre,5)
    canny=cv2.Canny(thre,100,200)
    #sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
    dilation=cv2.dilate(canny,element2,iterations = 1)
    dst=cv2.erode(dilation, element1, iterations = 1)
    return dst
项目:Camera2TCP    作者:kevinkit    | 项目源码 | 文件源码
def get_gradient(self,im) :
        # Calculate the x and y gradients using Sobel operator
        grad_x = cv2.Sobel(im,cv2.CV_32F,1,0,ksize=3)
        grad_y = cv2.Sobel(im,cv2.CV_32F,0,1,ksize=3)

        # Combine the two gradients
        grad = cv2.addWeighted(np.absolute(grad_x), 0.5, np.absolute(grad_y), 0.5, 0)
        return grad
项目:SummerProject_MacularDegenerationDetection    作者:WDongYuan    | 项目源码 | 文件源码
def EdgeDetection(img):
    # img = cv2.medianBlur(img,5)
    img = cv2.fastNlMeansDenoising(img,None,3,7,21)
    _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO)
    denoise_img = img
    # print(img)
    # cv2.imwrite("Denoise.jpg",img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    # convolute with proper kernels
    laplacian = cv2.Laplacian(img,cv2.CV_64F)
    sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)  # x
    sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3)  # y
    # sobel2y = cv2.Sobel(sobely,cv2.CV_64F,0,1,ksize=3)
    # sobelxy = cv2.Sobel(img,cv2.CV_64F,1,1,ksize=5)  # y
    canny = cv2.Canny(img,100,200)
    contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # print(canny)
    # cv2.imwrite('laplacian.jpg',laplacian)
    # cv2.imwrite('sobelx.jpg',sobelx)
    # cv2.imwrite('sobely.jpg',sobely)
    # cv2.imwrite('sobelxy.jpg',sobelxy)
    # cv2.imwrite('canny.jpg',canny)

    # plt.subplot(3,2,1),plt.imshow(img,cmap = 'gray')
    # plt.title('Original'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,2),plt.imshow(laplacian,cmap = 'gray')
    # plt.title('Laplacian'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,3),plt.imshow(sobelx,cmap = 'gray')
    # plt.title('Sobel X'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,4),plt.imshow(sobely,cmap = 'gray')
    # plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,4),plt.imshow(sobelxy,cmap = 'gray')
    # plt.title('Sobel XY'), plt.xticks([]), plt.yticks([])

    # plt.subplot(3,2,5),plt.imshow(canny,cmap = 'gray')
    # plt.title('Canny'), plt.xticks([]), plt.yticks([])

    # plt.show()
    # return {"denoise":img}
    return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}
项目:Automatic-Plate-Number-Recognition-APNR    作者:kagan94    | 项目源码 | 文件源码
def find_contours(img):
    '''
    :param img: (numpy array)
    :return: all possible rectangles (contours)
    '''
    img_blurred = cv2.GaussianBlur(img, (5, 5), 1)  # remove noise
    img_gray = cv2.cvtColor(img_blurred, cv2.COLOR_BGR2GRAY)  # greyscale image
    # cv2.imshow('', img_gray)
    # cv2.waitKey(0)

    # Apply Sobel filter to find the vertical edges
    # Find vertical lines. Car plates have high density of vertical lines
    img_sobel_x = cv2.Sobel(img_gray, cv2.CV_8UC1, dx=1, dy=0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
    # cv2.imshow('img_sobel', img_sobel_x)

    # Apply optimal threshold by using Oslu algorithm
    retval, img_threshold = cv2.threshold(img_sobel_x, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
    # cv2.imshow('s', img_threshold)
    # cv2.waitKey(0)

    # TODO: Try to apply AdaptiveThresh
    # Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
    # gaus_threshold = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 115, 1)
    # cv2.imshow('or', img)
    # cv2.imshow('gaus', gaus_threshold)
    # cv2.waitKey(0)

    # Define a stuctural element as rectangular of size 17x3 (we'll use it during the morphological cleaning)
    element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))

    # And use this structural element in a close morphological operation
    morph_img_threshold = deepcopy(img_threshold)
    cv2.morphologyEx(src=img_threshold, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img_threshold)
    # cv2.dilate(img_threshold, kernel=np.ones((1,1), np.uint8), dst=img_threshold, iterations=1)
    # cv2.imshow('Normal Threshold', img_threshold)
    # cv2.imshow('Morphological Threshold based on rect. mask', morph_img_threshold)
    # cv2.waitKey(0)

    # Find contours that contain possible plates (in hierarchical relationship)
    contours, hierarchy = cv2.findContours(morph_img_threshold,
                                           mode=cv2.RETR_EXTERNAL,  # retrieve the external contours
                                           method=cv2.CHAIN_APPROX_NONE)  # all pixels of each contour

    plot_intermediate_steps = False
    if plot_intermediate_steps:
        plot(plt, 321, img, "Original image")
        plot(plt, 322, img_blurred, "Blurred image")
        plot(plt, 323, img_gray, "Grayscale image", cmap='gray')
        plot(plt, 324, img_sobel_x, "Sobel")
        plot(plt, 325, img_threshold, "Threshold image")
        # plot(plt, 326, morph_img_threshold, "After Morphological filter")
        plt.tight_layout()
        plt.show()

    return contours
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    # grayscale image
    if len(colorsrc.shape)==2:
        graysrc = cv2.GaussianBlur(colorsrc, (3, 3), 0)

        ## gradient X ##
        gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
        gradx = cv2.convertScaleAbs(gradx)

        ## gradient Y ##
        grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
        grady = cv2.convertScaleAbs(grady)

        grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

        return grad

    # multi-channel image
    else:
        gradx_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        grady_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        for index in range(colorsrc.shape[2]):
            graysrc=colorsrc[:,:,index]
            graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

            ## gradient X ##
            gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
            gradx = cv2.convertScaleAbs(gradx)
            gradx_total=gradx_total+gradx

            ## gradient Y ##
            grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
            grady = cv2.convertScaleAbs(grady)
            grady_total = grady_total + grady

            grad = cv2.addWeighted(gradx_total, 0.5, grady_total, 0.5, 0)

        return grad