Python cv2 模块,boxFilter() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用cv2.boxFilter()

项目:DeHaze    作者:XierHacker    | 项目源码 | 文件源码
def Guidedfilter(im, p, r, eps):
    mean_I = cv2.boxFilter(im, cv2.CV_64F, (r, r));
    mean_p = cv2.boxFilter(p, cv2.CV_64F, (r, r));
    mean_Ip = cv2.boxFilter(im * p, cv2.CV_64F, (r, r));
    cov_Ip = mean_Ip - mean_I * mean_p;

    mean_II = cv2.boxFilter(im * im, cv2.CV_64F, (r, r));
    var_I = mean_II - mean_I * mean_I;

    a = cov_Ip / (var_I + eps);
    b = mean_p - a * mean_I;

    mean_a = cv2.boxFilter(a, cv2.CV_64F, (r, r));
    mean_b = cv2.boxFilter(b, cv2.CV_64F, (r, r));

    q = mean_a * im + mean_b;
    return q;
项目:DeHaze    作者:XierHacker    | 项目源码 | 文件源码
def Guidedfilter(im,p,r,eps):
    mean_I = cv2.boxFilter(im,cv2.CV_64F,(r,r));
    mean_p = cv2.boxFilter(p, cv2.CV_64F,(r,r));
    mean_Ip = cv2.boxFilter(im*p,cv2.CV_64F,(r,r));
    cov_Ip = mean_Ip - mean_I*mean_p;

    mean_II = cv2.boxFilter(im*im,cv2.CV_64F,(r,r));
    var_I   = mean_II - mean_I*mean_I;

    a = cov_Ip/(var_I + eps);
    b = mean_p - a*mean_I;

    mean_a = cv2.boxFilter(a,cv2.CV_64F,(r,r));
    mean_b = cv2.boxFilter(b,cv2.CV_64F,(r,r));

    q = mean_a*im + mean_b;
    return q;
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def box_blur(im, size=3): 
    return cv2.boxFilter(im, -1, (size,size))
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def foreground(img,blockSize=31):
    """calculate foreground in an image
    return: foreground
    """
    img=100*(img-np.mean(img))
    img[np.where(img>255)]=255
    img=cv2.boxFilter(img,-1,(blockSize,blockSize))
    img[np.where(img>150)]=255; img[np.where(img<=150)]=0   
    img=cv2.boxFilter(img,-1,(blockSize/2,blockSize/2))
    img[np.where(img>0)]=255
    return img
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def minutiaeExtract(img,imgfore):   
    """minutiae extraction: ending and bifurcation    
    img: thinned image
    imgfore: foreground image
    return: minutiae, directions
    """
    image=img.copy()
    P1=image[1:-1,1:-1]
    valid=np.where(P1==1)
    #P1:center; P2-P9:neighbors
    P1,P2,P3,P4,P5,P6,P7,P8,P9 = P1[valid],image[2:,1:-1][valid], image[2:,2:][valid], image[1:-1,2:][valid], image[:-2,2:][valid], image[:-2,1:-1][valid],image[:-2,:-2][valid], image[1:-1,:-2][valid], image[2:,:-2][valid]
    CN=pre.transitions_vec(P2,P3,P4,P5,P6,P7,P8,P9)
    ending_index=np.where(CN==1)
    bifur_index=np.where(CN==3)
    ending=np.asarray((valid[0][ending_index]+1,valid[1][ending_index]+1))
    bifur=np.asarray((valid[0][bifur_index]+1,valid[1][bifur_index]+1))
    #delete minutiae near the edge of the foreground
    imgfored=cv2.boxFilter(imgfore,-1,(9,9))
    imgfored[np.where(imgfored>0)]=255
    edge1,edge2=np.where(imgfored[ending[0],ending[1]]==255),np.where(imgfored[bifur[0],bifur[1]]==255)
    ending=np.delete(ending.T,edge1[0],0)
    bifur=np.delete(bifur.T,edge2[0],0)
    #delete minutiae near the edge of the image
    edgeDistance=20
    valid1=(ending[:,0]>=edgeDistance) * (ending[:,0]<=img.shape[0]-edgeDistance)
    valid2=(ending[:,1]>=edgeDistance) * (ending[:,1]<=img.shape[1]-edgeDistance)
    ending=ending[np.where(valid1 * valid2)]
    valid1=(bifur[:,0]>=edgeDistance) * (bifur[:,0]<=img.shape[0]-edgeDistance)
    valid2=(bifur[:,1]>=edgeDistance) * (bifur[:,1]<=img.shape[1]-edgeDistance)
    bifur=bifur[np.where(valid1 * valid2)]              
    #valide minutiae and calculate directions at the same time
    ending,theta1=validateMinutiae(image,ending,1)
    bifur,theta2=validateMinutiae(image,bifur,0)
    return ending,bifur,theta1,theta2
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def foreground(img,blockSize=31):
    img=100*(img-np.mean(img))
    img[np.where(img>255)]=255
    img=cv2.boxFilter(img,-1,(blockSize,blockSize))
    img[np.where(img>150)]=255; img[np.where(img<=150)]=0   
    img=cv2.boxFilter(img,-1,(blockSize/2,blockSize/2))
    img[np.where(img>0)]=255
    return img
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def foreground(img,blockSize=31):
    img=100*(img-np.mean(img))
    img[np.where(img>255)]=255
    img=cv2.boxFilter(img,-1,(blockSize,blockSize))
    img[np.where(img>150)]=255; img[np.where(img<=150)]=0   
    img=cv2.boxFilter(img,-1,(blockSize/2,blockSize/2))
    img[np.where(img>0)]=255
    return img
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def foreground(img,blockSize=31):
    """calculate foreground in an image
    return: foreground
    """
    img=100*(img-np.mean(img))
    img[np.where(img>255)]=255
    img=cv2.boxFilter(img,-1,(blockSize,blockSize))
    img[np.where(img>150)]=255; img[np.where(img<=150)]=0   
    img=cv2.boxFilter(img,-1,(blockSize/2,blockSize/2))
    img[np.where(img>0)]=255
    return img
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def foreground(img,blockSize=31):
    img=100*(img-np.mean(img))
    img[np.where(img>255)]=255
    img=cv2.boxFilter(img,-1,(blockSize,blockSize))
    img[np.where(img>150)]=255; img[np.where(img<=150)]=0   
    img=cv2.boxFilter(img,-1,(blockSize/2,blockSize/2))
    img[np.where(img>0)]=255
    return img
项目:vision-code    作者:FIRST-Team-1699    | 项目源码 | 文件源码
def run(self):
    bytes='' 
    while not self.thread_cancelled:
      try:
        bytes+=self.stream.raw.read(1024) 
        a = bytes.find('\xff\xd8')
        b = bytes.find('\xff\xd9')
        if a!=-1 and b!=-1:
          jpg = bytes[a:b+2]
          bytes= bytes[b+2:]
          img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)

          # Convert BGR to HSV
          hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
          # define range of blue color in HSV
          #lower_blue = np.array([self.L_RED, self.L_GREEN, self.L_BLUE], np.uint8)
          #upper_blue = np.array([self.U_RED, self.U_GREEN, self.L_BLUE], np.uint8)

          # Threshold the HSV image to get only blue colors
          mask = cv2.inRange(hsv, np.array([53,187,37]), np.array([97,244,153]))

          # Bitwise-AND mask and original image
          res = cv2.bitwise_and(img,img, mask= mask)
####        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
          blurred = cv2.boxFilter(mask, 0, (7, 7), mask, (-1, -1), False, cv2.BORDER_DEFAULT)
          thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
          cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
          cnts = cnts[0] if imutils.is_cv2() else cnts[1]
          cv2.filterSpeckles(mask, 0, 100, 25)
##          cv2.filterSpeckles(mask, 0, 50, 25)
##          cv2.filterSpeckles(mask, 0, 100, 100)
          for c in cnts:
              M = cv2.moments(c)
              if int(M["m00"]) != 0:
                  cX = int(M["m10"] / M["m00"])
                  cY = int(M["m01"] / M["m00"])
              else:
                  (cX, cY) = (0, 0)
              print(cX, cY)
              cv2.drawContours(res, [c], -1, (0, 255, 0), 2)
              cv2.circle(res, (cX, cY), 7, (255, 255, 255), 1)

             # table.putNumber("center X", cX)
          cv2.imshow('img',img)
          cv2.imshow('mask',mask)
          cv2.imshow('Final',res)
          cv2.imshow('cam',img)
          #sd.putNumber('Center X', cX) ##send the x value of the center
          #sd.putNumber('Center Y', cY) ##send the y value of the center
##          print(sd.getNumber('Center Y'), sd.getNumber('Center X'))
          if cv2.waitKey(1) ==27:
            exit(0)
      except ThreadError:
        self.thread_cancelled = True
项目:vision-code    作者:FIRST-Team-1699    | 项目源码 | 文件源码
def run(self):
    bytes='' 
    while not self.thread_cancelled:  ####see lines 18, 80, 88 ....
      try:        
        bytes+=self.stream.raw.read(1024) ##limit max bytes read in 1 itteration? need to read more on this
        a = bytes.find('\xff\xd8')##find start of stream of data  
        b = bytes.find('\xff\xd9')##find our end of data stream
        if a!=-1 and b!=-1:  ##so as long as we have a stream of data....do the following
          jpg = bytes[a:b+2]  ##converts to image or a specific variable...
          bytes= bytes[b+2:]  
          img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)  ##decode the data

          # Convert BGR to HSV
          hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)  ##converting color format for easier proccessing/ math
          # define range of blue color in HSV
          #lower_blue = np.array([self.L_RED, self.L_GREEN, self.L_BLUE], np.uint8)
          #upper_blue = np.array([self.U_RED, self.U_GREEN, self.L_BLUE], np.uint8)

          # Threshold the HSV image to get only blue colors
          mask = cv2.inRange(hsv, np.array([53,187,37]), np.array([97,244,153]))  ##get colors in the range of these HSV values

          # Bitwise-AND mask and original image
          res = cv2.bitwise_and(img,img, mask= mask)

          blurred = cv2.boxFilter(mask, 0, (7, 7), mask, (-1, -1), False, cv2.BORDER_DEFAULT) ##the next few line create outlines and 
          thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]                      ##remove any noise
          cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  #find countors
          cnts = cnts[0] if imutils.is_cv2() else cnts[1]   
          cv2.filterSpeckles(mask, 0, 100, 25)      ##remove speckles aka random dots and white noise
          for c in cnts:
              M = cv2.moments(c)
              if int(M["m00"]) != 0:  ##Checks for division by zero
                  cX = int(M["m10"] / M["m00"])
                  cY = int(M["m01"] / M["m00"])
              else:
                  (cX, cY) = (0, 0)
              cv2.drawContours(res, [c], -1, (0, 255, 0), 2)  ##draw box/highlighting 
              cv2.circle(res, (cX, cY), 7, (255, 255, 255), 1)  ##draw box/highlighting 

              ##Try-Catch for appending cX to table
              try:
                self.table.putNumber('centerX', cX)  ##Adds cX to the networktables
              except KeyError:
                print("centerX failed.")

          cv2.imshow('img',img)   ##display original image
          cv2.imshow('mask',mask)  ##display masked image
          cv2.imshow('Final',res)  ##show final image


          cv2.imshow('cam',img)  ##see line 71/comments
          if cv2.waitKey(1) ==27:  ##now we close if esc key is pressed
            exit(0)
      except ThreadError:
        self.thread_cancelled = True