Python cv2 模块,GaussianBlur() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.GaussianBlur()

项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects)
项目:PaperHelper    作者:EdgarNg1024    | 项目源码 | 文件源码
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def remove_borders(image):
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)
    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow('edged', edged)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        print(len(approx) == 4)
        if len(approx) == 4:
            screenCnt = approx
            break
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    if screenCnt is not None and len(screenCnt) > 0:
        return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    return orig
项目:SudokuSolver    作者:Anve94    | 项目源码 | 文件源码
def apply_filters(self, image, denoise=False):
        """ This method is used to apply required filters to the
            to extracted regions of interest. Every square in a
            sudoku square is considered to be a region of interest,
            since it can potentially contain a value. """
        # Convert to grayscale
        source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # Denoise the grayscale image if requested in the params
        if denoise:
            denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
            source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
            # source_blur = denoised_gray
        else:
            source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
        source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
        source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
        if ENABLE_PREVIEW_ALL:
            image_preview(source_dilated)
        return source_dilated
项目:beryl    作者:DanielJDufour    | 项目源码 | 文件源码
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                area = cv2.contourArea(cnt)
                if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07):
                            squares.append(cnt)
    return squares
项目:DoNotSnap    作者:AVGInnovationLabs    | 项目源码 | 文件源码
def affine_skew(self, tilt, phi, img, mask=None):
        h, w = img.shape[:2]
        if mask is None:
            mask = np.zeros((h, w), np.uint8)
            mask[:] = 255
        A = np.float32([[1, 0, 0], [0, 1, 0]])
        if phi != 0.0:
            phi = np.deg2rad(phi)
            s, c = np.sin(phi), np.cos(phi)
            A = np.float32([[c, -s], [s, c]])
            corners = [[0, 0], [w, 0], [w, h], [0, h]]
            tcorners = np.int32(np.dot(corners, A.T))
            x, y, w, h = cv2.boundingRect(tcorners.reshape(1, -1, 2))
            A = np.hstack([A, [[-x], [-y]]])
            img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
        if tilt != 1.0:
            s = 0.8*np.sqrt(tilt * tilt - 1)
            img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
            img = cv2.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
            A[0] /= tilt
        if phi != 0.0 or tilt != 1.0:
            h, w = img.shape[:2]
            mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
        Ai = cv2.invertAffineTransform(A)
        return img, mask, Ai
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]]
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def find_squares(img, cos_limit = 0.1):
    print('search for squares with threshold %f' % cos_limit)
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < cos_limit :
                        squares.append(cnt)
                    else:
                        #print('dropped a square with max_cos %f' % max_cos)
                        pass
    return squares

###
### Version V2.  Collect meta-data along the way,  with commentary added.
###
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bibs(image):
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  binary = cv2.GaussianBlur(gray,(5,5),0)
  ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
  #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY);

  #lapl = cv2.Laplacian(image,cv2.CV_64F)
  #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY);
  #blurred = cv2.GaussianBlur(lapl,(5,5),0)
  #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #cv2.imwrite("lapl.jpg", lapl)

  edges = cv2.Canny(image,175,200)
  cv2.imwrite("edges.jpg", edges)
  binary = edges

  cv2.imwrite("binary.jpg", binary)
  contours,hierarchy = find_contours(binary)

  return get_rectangles(contours)
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def diff_rect(img1, img2, pos=None):
    """find counters include pos in differences between img1 & img2 (cv2 images)"""
    diff = cv2.absdiff(img1, img2)
    diff = cv2.GaussianBlur(diff, (3, 3), 0)
    edges = cv2.Canny(diff, 100, 200)
    _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    if not contours:
        return None
    contours.sort(key=lambda c: len(c))
    # no pos provide, just return the largest different area rect
    if pos is None:
        cnt = contours[-1]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        return (x0, y0, x1, y1)
    # else the rect should contain the pos
    x, y = pos
    for i in range(len(contours)):
        cnt = contours[-1-i]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        if x0 <= x <= x1 and y0 <= y <= y1:
            return (x0, y0, x1, y1)
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def filter_image(image, canny1=10, canny2=10, show=False):
    # compute the ratio of the old height to the new height, and resize it
    image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_CUBIC)

    # convert the image to grayscale, blur it, and find edges in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, canny1, canny2)

    # show the image(s)
    if show:
        cv2.imshow("Edged", edged)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return edged
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def filter_image(image, canny1=10, canny2=10, show=False):
    # compute the ratio of the old height to the new height, and resize it
    image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_CUBIC)

    # convert the image to grayscale, blur it, and find edges in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, canny1, canny2)

    # show the image(s)
    if show:
        cv2.imshow("Edged", edged)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return edged
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def filter_image(image, canny1=5, canny2=5, show=False):
    # compute the ratio of the old height to the new height, and resize it
    image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_NEAREST)

    # convert the image to grayscale, blur it, and find edges in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(blurred, canny1, canny2)

    # show the image(s)
    if show:
        cv2.imshow("Edged", edged)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return edged
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def filter_image(image, canny1=10, canny2=10, show=False):
    # compute the ratio of the old height to the new height, and resize it
    image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_NEAREST)

    # convert the image to grayscale, blur it, and find edges in the image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, canny1, canny2)

    # show the image(s)
    if show:
        cv2.imshow("Edged", edged)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return edged
项目:SudokuSolver    作者:Anve94    | 项目源码 | 文件源码
def apply_blur(self, image):
        """ Adds a blur to the given image, using the kernel size
            defined in settings. """
        if ENABLE_DEBUG:
            print("DEBUG -- Attempting to apply gaussian blur to"
                  " grayscale image.")
        try:
            blurred = cv2.GaussianBlur(src=image,
                                       ksize=BLUR_KERNEL_SIZE,
                                       sigmaX=0)
        except:
            if VERBOSE_EXIT:
                print("ERROR -- Could not apply blur filter. Please check"
                      " settings and consider changing the blur kernel size.")
            exit()
        if ENABLE_PREVIEW_ALL:
            image_preview(blurred)
        if ENABLE_DEBUG:
            print("DEBUG -- Gaussian Blur succesfully applied.")
        return blurred
项目:opencv-gui-helper-tool    作者:maunesh    | 项目源码 | 文件源码
def _render(self):
        self._smoothed_img = cv2.GaussianBlur(self.image, (self._filter_size, self._filter_size), sigmaX=0, sigmaY=0)
        self._edge_img = cv2.Canny(self._smoothed_img, self._threshold1, self._threshold2)
        cv2.imshow('smoothed', self._smoothed_img)
        cv2.imshow('edges', self._edge_img)
项目:ab2016-ros-gazebo    作者:akademikbilisim    | 项目源码 | 文件源码
def camera_callback(self, msg):
        try:
            self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        except cv_bridge.CvBridgeError:
            return

        gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(blur, 30, 150)

        cv2.imshow("Robot Camera", canny)
        cv2.waitKey(1)
项目:FaceSwapper    作者:QuantumLiu    | 项目源码 | 文件源码
def get_face_mask(self,im, landmarks,ksize=(11,11)):
        '''
        ??????
        '''
        mask = np.zeros(im.shape[:2], dtype=np.float64)

        for group in self.OVERLAY_POINTS:
            self.draw_convex_hull(mask,
                             landmarks[group],
                             color=1)

        mask = np.array([mask, mask, mask]).transpose((1, 2, 0))

        mask = (cv2.GaussianBlur(mask, ksize, 0) > 0) * 1.0
        mask = cv2.GaussianBlur(mask, ksize, 0)

        return mask
项目:Speedy-TSLSR    作者:talhaHavadar    | 项目源码 | 文件源码
def __filterRedColor(image_hsv):
    """
        Filters the red color from image_hsv and returns mask.
    """
    mask1 = cv2.inRange(image_hsv, np.array([0, 100, 65]), np.array([10, 255, 255]))
    mask2 = cv2.inRange(image_hsv, np.array([155, 100, 70]), np.array([179, 255, 255]))
    mask = mask1 + mask2
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    mask = cv2.Canny(mask, 50, 100)
    mask = cv2.GaussianBlur(mask, (13, 13), 0)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)))
    return mask
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def dif_gaus(image, lower, upper):
        lower, upper = int(lower-1), int(upper-1)
        lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0)
        upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0)
        # upper +=50
        # lower +=50
        dif = lower-upper
        # dif *= .1
        # dif = cv2.medianBlur(dif,3)
        # dif = 255-dif
        dif = cv2.inRange(dif, np.asarray(200),np.asarray(256))
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
        dif = cv2.dilate(dif, kernel, iterations=2)
        dif = cv2.erode(dif, kernel, iterations=1)
        # dif = cv2.max(image,dif)
        # dif = cv2.dilate(dif, kernel, iterations=1)
        return dif
项目:ConditionalGAN    作者:seungjooli    | 项目源码 | 文件源码
def detect_edges(images):
        def blur(image):
            return cv2.GaussianBlur(image, (5, 5), 0)

        def canny_otsu(image):
            scale_factor = 255
            scaled_image = np.uint8(image * scale_factor)

            otsu_threshold = cv2.threshold(
                cv2.cvtColor(scaled_image, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0]
            lower_threshold = max(0, int(otsu_threshold * 0.5))
            upper_threshold = min(255, int(otsu_threshold))
            edges = cv2.Canny(scaled_image, lower_threshold, upper_threshold)
            edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)

            return np.float32(edges) * (1 / scale_factor)

        blurred = [blur(image) for image in images]
        canny_applied = [canny_otsu(image) for image in blurred]

        return canny_applied
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def process_img(img):
    original_image=img
    processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
    processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
    copy=processed_img
    vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
    processed_img = roi(processed_img, np.int32([vertices]))
    verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
    platform = roi(copy, np.int32([verticesP]))
    #                       edges
    #lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
    #draw_lines(processed_img,lines)
    #draw_lines(original_image,lines)

    #Platform lines
    #imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
    ret,thresh = cv2.threshold(platform,127,255,0)
    im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
    try:
        platformpos=contours[0][0][0]
    except:
        platformpos=[[0]]
    circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
                               param1=90, param2=5, minRadius=1, maxRadius=3)

    ballpos=draw_circles(original_image,circles=circles)

    return processed_img,original_image,platform,platformpos,ballpos
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bib(image):
  width, height, depth = image.shape

  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  #gray = cv2.equalizeHist(gray)
  blurred = cv2.GaussianBlur(gray,(5,5),0)

  debug_output("find_bib_blurred", blurred)
  #binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0);
  ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY);
  debug_output("find_bib_binary", binary)
  threshold_contours,hierarchy = find_contours(binary)

  debug_output("find_bib_threshold", binary)

  edges = cv2.Canny(gray,175,200, 3)
  edge_contours,hierarchy = find_contours(edges)

  debug_output("find_bib_edges", edges)

  contours = threshold_contours + edge_contours
  debug_output_contours("find_bib_threshold_contours", image, contours)

  rectangles = get_rectangles(contours)

  debug_output_contours("find_bib_rectangles", image, rectangles)

  potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)]

  debug_output_contours("find_bib_potential_bibs", image, potential_bibs)

  ideal_aspect_ratio = 1.0
  potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio))

  return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]])

#
# Checks that the size and aspect ratio of the contour is appropriate for a bib.
#
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def diff_rect(img1, img2, pos=None):
    """find counters include pos in differences between img1 & img2 (cv2 images)"""
    diff = cv2.absdiff(img1, img2)
    diff = cv2.GaussianBlur(diff, (3, 3), 0)
    edges = cv2.Canny(diff, 100, 200)
    _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    if not contours:
        return None
    contours.sort(key=lambda c: len(c))
    # no pos provide, just return the largest different area rect
    if pos is None:
        cnt = contours[-1]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        return (x0, y0, x1, y1)
    # else the rect should contain the pos
    x, y = pos
    for i in range(len(contours)):
        cnt = contours[-1-i]
        x0, y0, w, h = cv2.boundingRect(cnt)
        x1, y1 = x0+w, y0+h
        if x0 <= x <= x1 and y0 <= y <= y1:
            return (x0, y0, x1, y1)
项目:SSPP-DAN    作者:csehong    | 项目源码 | 文件源码
def train_jitter(self, img, w=None, h=None, f=None, s=None):
        if w is None:
            w = random.randint(224, 250)
        if h is None:
            h = w
        if f is None:
            f = random.choice([True, False])
        if s is None:
            s = random.choice([False, 0, 1])
        img = img_proc.resize(img, (w,h))
        img = img_proc.crop_center(img, (224, 224))
        if s is not False:
            img = cv2.GaussianBlur(img, (11, 11), s)
        if f:
            img = img_proc.flip(img)
        return img
项目:slide_captcha_cracker    作者:chxj1992    | 项目源码 | 文件源码
def predict():
    response = requests.get(slide_captcha_url)
    base64_image = response.json()['data']['dataUrl']
    base64_image_without_head = base64_image.replace('data:image/png;base64,', '')

    bytes_io = BytesIO(base64.b64decode(base64_image_without_head))
    img = np.array(Image.open(bytes_io).convert('RGB'))

    img_blur = cv2.GaussianBlur(img, (3, 3), 0)
    img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
    img_canny = cv2.Canny(img_gray, 100, 200)

    operator = get_operator('shape.png')

    (x, y), _ = best_match(img_canny, operator)
    x = x + bias
    print('the position of x is', x)

    buffer = mark(img, x, y)

    return {'value': x, 'image': base64.b64encode(buffer.getbuffer()).decode()}
项目:SynthText    作者:ankush-me    | 项目源码 | 文件源码
def drop_shadow(self, alpha, theta, shift, size, op=0.80):
        """
        alpha : alpha layer whose shadow need to be cast
        theta : [0,2pi] -- the shadow direction
        shift : shift in pixels of the shadow
        size  : size of the GaussianBlur filter
        op    : opacity of the shadow (multiplying factor)

        @return : alpha of the shadow layer
                  (it is assumed that the color is black/white)
        """
        if size%2==0:
            size -= 1
            size = max(1,size)
        shadow = cv.GaussianBlur(alpha,(size,size),0)
        [dx,dy] = shift * np.array([-np.sin(theta), np.cos(theta)])
        shadow = op*sii.shift(shadow, shift=[dx,dy],mode='constant',cval=0)
        return shadow.astype('uint8')
项目:FacePoseEstimation    作者:abhisharma7    | 项目源码 | 文件源码
def image(self):

        img = cv2.imread(self.image_path)
        img = imutils.resize(img,width=min(800,img.shape[1]))
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray,(21,21),0)
        fullbody = self.HogDescriptor(gray)
        for (x,y,w,h) in fullbody:
            cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)

        faces = self.haar_facedetection(gray)
        for (x,y,w,h) in faces:
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            eyes = self.haar_eyedetection(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2) 
            smile = self.haar_smilecascade(roi_gray)
            for (sx,sy,sw,sh) in smile:
                cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
        img = self.dlib_function(img)
        cv2.imshow('img',img)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
项目:edison_developing    作者:vincentchung    | 项目源码 | 文件源码
def camera_gesture_trigger():
    # Capture frame-by-frame
    ret, frame = cap.read()
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray,(5,5),0)
    ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

    contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    max_area=0

    for i in range(len(contours)):
        cnt=contours[i]
        area = cv2.contourArea(cnt)
        if(area>max_area):
            max_area=area
            ci=i
    cnt=contours[ci]
    hull = cv2.convexHull(cnt)
    moments = cv2.moments(cnt)

    cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
    hull = cv2.convexHull(cnt,returnPoints = False)

    defects = cv2.convexityDefects(cnt,hull)                    

    if defects is not None:         
        if defects.shape[0] >= 5:
            return 1

    return 0
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def depth_callback(self, ros_image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(ros_image)
        except CvBridgeError, e:
            print e

        inImgarr = np.array(inImg, dtype=np.uint16)
        # inImgarr = cv2.GaussianBlur(inImgarr, (3, 3), 0)
        # cv2.normalize(inImgarr, inImgarr, 0, 1, cv2.NORM_MINMAX) 

        self.outImg, self.num_fingers = self.process_depth_image(inImgarr) 
        # outImg = self.process_depth_image(inImgarr) 
        # rate = rospy.Rate(10)        
        self.num_pub.publish(self.num_fingers)
        # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))
        # rate.sleep()

        cv2.imshow("Hand Gesture Recognition", self.outImg)
        cv2.waitKey(3)
项目:HandwritingRecognition    作者:eng-tsmith    | 项目源码 | 文件源码
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret3, img_binary = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)

    # invert black = 255
    ret, thresh1 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)

    return thresh1
项目:HandwritingRecognition    作者:eng-tsmith    | 项目源码 | 文件源码
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Global
    # ret1, thresh1 = cv.threshold(img_grey, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh1)
    #
    # # Adaptive Mean
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
    # ret2, thresh2 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh2)
    #
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
    # ret3, thresh3 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh3)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret4, img_otsu = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
    ret4, thresh4 = cv.threshold(img_otsu, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh4)

    return thresh4
项目:tensorlight    作者:bsautermeister    | 项目源码 | 文件源码
def gaussian_blur(image, sigma=1.0):
    """Applies a gaussian filter to an image, by processing each channel seperately.
    Notes: we do not use: scipy.ndimage.filters.gaussian_filter(), because it makes
           the image gray-scaled, but with shape [..., 3]
    Parameters
    ----------
    image: ndarray
        The image to filter. Can have any number of channels, because each is
        processed separately.
    sigma: float
        The sigma level, where a larger number means more blur effect.
    Returns
    ----------
    The blurred image.
    """

    # apply equal gaussian and use ksize=0 to auto-compute it from sigma
    blurred_img = cv2.GaussianBlur(image,(0,0), sigma)
    return blurred_img
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img,blockSize):
    """calculate ridge directions in an image, using gradient method
    return: ridge directions
    """
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    N,M=np.shape(img)
    Vx=np.zeros((N/blockSize,M/blockSize))
    Vy=np.zeros((N/blockSize,M/blockSize))
    for i in xrange(N/blockSize):
        for j in xrange(M/blockSize):
            a=i*blockSize;b=a+blockSize;c=j*blockSize;d=c+blockSize
            Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
            Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)
    gaussianBlurSigma=2;    gaussian_block=5
    Vy=cv2.GaussianBlur(Vy,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    Vx=cv2.GaussianBlur(Vx,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    theta=0.5*np.arctan2(Vy,Vx)            
    return theta
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcWlBox(img, blockSize, boxSize):
    resize=5
    img=cv2.resize(img,None,fx=resize,fy=resize,interpolation = cv2.INTER_CUBIC)
    blockSize=blockSize*resize
    boxSize=boxSize*resize
    N,M=img.shape
    wl=100*np.ones((img.shape[0]/boxSize,img.shape[1]/boxSize))    
    ii=-1
    for i in xrange(blockSize/2,N-blockSize/2,boxSize):
        ii += 1
        if ii>=N/boxSize:
            break
        a=i-blockSize/2
        b=a+blockSize
        jj=-1
        for j in xrange(blockSize/2,M-blockSize/2,boxSize):
            jj += 1
            if jj>=M/boxSize:
                break
            c=j-blockSize/2
            d=c+blockSize
            wl[ii,jj]=blkwl(img[a:b,c:d])
    gaussianBlurSigma=4;    gaussian_block=9
    wl=cv2.GaussianBlur(wl,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    return wl/resize
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def calcDirection(img,blockSize):
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    N,M=np.shape(img)
    Vx=np.zeros((N/blockSize,M/blockSize))
    Vy=np.zeros((N/blockSize,M/blockSize))
    for i in xrange(N/blockSize):
        for j in xrange(M/blockSize):
            a=i*blockSize;b=a+blockSize;c=j*blockSize;d=c+blockSize
            Vy[i,j]=2*np.sum(par_x[a:b,c:d]*par_y[a:b,c:d])
            Vx[i,j]=np.sum(par_y[a:b,c:d]**2-par_x[a:b,c:d]**2)
    gaussianBlurSigma=2;    gaussian_block=5
    Vy=cv2.GaussianBlur(Vy,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    Vx=cv2.GaussianBlur(Vx,(gaussian_block,gaussian_block),gaussianBlurSigma,gaussianBlurSigma)
    theta=0.5*np.arctan2(Vy,Vx)#+np.pi/2            
    return theta
项目:SketchSimplification    作者:La4La    | 项目源码 | 文件源码
def addDirt(img):
    rows, cols = img.shape
    Dirt = np.ones(img.shape, np.uint8) * 255
    change = 0
    for i in range(random.randint(0,3)):
        change = 1
        y = random.randint(0,rows-1)
        x = random.randint(0,cols-1)
        dy = random.randint(rows//30,rows//5)
        dx = random.randint(cols//30,cols//5)
        Dirt[y:min(y+dy,rows-1), x:min(x+dx,cols-1)] =  random.randint(215,230)
    k_size = random.randint(max(rows,cols)//18,max(rows,cols)//14) * 2 + 1
    Dirt = cv2.GaussianBlur(Dirt, (k_size, k_size), 0)
    if change:
        img = np.where(img < 230, img, Dirt)
    return img
项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def get_face_mask(im, landmarks):
    im = np.zeros(im.shape[:2], dtype=np.float64)

    for landmark in landmarks:
        for group in OVERLAY_POINTS:
            draw_convex_hull(im,
                             landmark[group],
                             color=1)

    im = np.array([im, im, im]).transpose((1, 2, 0))

    im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
    im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)

    return im

# Draw delaunay triangles
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_sobel_binary(im, blur_sz):

    # ??????????????
    img_blur = cv2.GaussianBlur(im,blur_sz,0)
    if len(img_blur.shape) == 3:
        blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY) 
    else:
        blur_gray = img_blur

    # ??Sobel????
    sobelx = cv2.Sobel(blur_gray,cv2.CV_16S,1,0,ksize=3)
    abs_sobelx = np.absolute(sobelx)
    sobel_8u = np.uint8(abs_sobelx)
    img_show_hook("Sobel??", sobel_8u)

    # OTSU??????    
    ret, thd = cv2.threshold(sobel_8u, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)    
    thd_abs = cv2.convertScaleAbs(thd)
    bgimg = cv2.addWeighted(thd_abs, 1, 0, 0, 0)

    img_show_hook("OTSU????", bgimg)

    return bgimg
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_sobel_binary(im, blur_sz):

    # ??????????????
    img_blur = cv2.GaussianBlur(im,blur_sz,0)
    if len(img_blur.shape) == 3:
        blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY) 
    else:
        blur_gray = img_blur

    # ??Sobel????
    sobelx = cv2.Sobel(blur_gray,cv2.CV_16S,1,0,ksize=3)
    abs_sobelx = np.absolute(sobelx)
    sobel_8u = np.uint8(abs_sobelx)
    img_show_hook("Sobel??", sobel_8u)

    # OTSU??????    
    ret, thd = cv2.threshold(sobel_8u, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)    
    thd_abs = cv2.convertScaleAbs(thd)
    bgimg = cv2.addWeighted(thd_abs, 1, 0, 0, 0)

    img_show_hook("OTSU????", bgimg)

    return bgimg
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def _do_filter(self, frame):
        ''' Process a single frame. '''
        # blur to reduce noise
        frame = cv2.GaussianBlur(frame, (5, 5), 0, borderType=cv2.BORDER_CONSTANT)

        # threshold to find contiguous regions of "bright" pixels
        # ignore all "dark" (<1/8 max) pixels
        max = numpy.max(frame)
        min = numpy.min(frame)
        # if the frame is completely dark, then just return it
        if max == min:
            return frame
        threshold = min + (max - min) / 8
        _, frame = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY)

        # filter out single pixels and other noise
        frame = cv2.erode(frame, self._element_shrink)

        # restore and join nearby regions (in case one fish has a skinny middle...)
        frame = cv2.dilate(frame, self._element_grow)

        return frame
项目:spfeas    作者:jgrss    | 项目源码 | 文件源码
def scale_rgb(layers, min_max, lidx):

    layers_c = np.empty(layers.shape, dtype='float32')

    # Rescale and blur.
    for li in range(0, 3):

        layer = layers[li]

        layer = np.float32(rescale_intensity(layer,
                                             in_range=(min_max[li][0],
                                                       min_max[li][1]),
                                             out_range=(0, 1)))

        layers_c[lidx[li]] = rescale_intensity(cv2.GaussianBlur(layer,
                                                                ksize=(3, 3),
                                                                sigmaX=3),
                                               in_range=(0, 1),
                                               out_range=(-1, 1))

    return layers_c
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def get_contour(self, arg_frame, arg_export_index, arg_export_path, arg_export_filename, arg_binaryMethod):
        # Otsu's thresholding after Gaussian filtering
        tmp = cv2.cvtColor(arg_frame, cv2.COLOR_RGB2GRAY)
        blur = cv2.GaussianBlur(tmp,(5,5),0)
        if arg_binaryMethod== 0:
            ret, thresholdedImg= cv2.threshold(blur.copy() , self.threshold_graylevel, 255 , 0)
        elif arg_binaryMethod == 1:
            ret,thresholdedImg = cv2.threshold(blur.copy(),0 ,255 ,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        elif arg_binaryMethod== 2:
            thresholdedImg = cv2.adaptiveThreshold(blur.copy(),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,5,0)

        result = cv2.cvtColor(thresholdedImg, cv2.COLOR_GRAY2RGB)
        ctrs, hier = cv2.findContours(thresholdedImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        ctrs = filter(lambda x : cv2.contourArea(x) > self.threshold_size , ctrs)

        rects = [[cv2.boundingRect(ctr) , ctr] for ctr in ctrs]

        for rect , cntr in rects:
            cv2.drawContours(result, [cntr], 0, (0, 128, 255), 3)
        if arg_export_index:
            cv2.imwrite(arg_export_path+ arg_export_filename+'.jpg', result)
        print "Get Contour success"
        return result
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def detect_optic_disk(image_rgb, disk_center, out_name):

    scale = 100
    w_sum = cv2.addWeighted(image_rgb, 4, cv2.GaussianBlur(image_rgb, (0, 0), scale / 30), -4, 128)#  * circular_mask + 128 * (1 - circular_mask)

    # Image.fromarray(np.mean(w_sum, axis=2).astype(np.uint8)).save(out_name)

    edges = canny(np.mean(w_sum, axis=2).astype(np.uint8), sigma=1.,
              low_threshold=50.)#, high_threshold=100.)
    result = hough_ellipse(edges, threshold=10, min_size=45, max_size=55)
    result.sort(order='accumulator')

    best = list(result[-1])
    yc, xc, a, b = [int(round(x)) for x in best[1:5]]
    orientation = best[5]

    cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
    image_rgb[cy, cx] = (0, 0, 255)

    Image.fromarray(image_rgb).save(out_name)
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def kaggle_BG(img, scale):

    # Create a mask from which the approximate retinal center can be calculated
    guide_mask = create_mask(img)
    retina_center = tuple((np.mean(np.argwhere(guide_mask), axis=0)).astype(np.uint8))[::-1]

    # Generate a circle of the approximate size, centered based on the guide mask
    cf = 1.2
    circular_mask = np.zeros(img.shape)
    cv2.circle(circular_mask, retina_center, int(scale * cf), (1, 1, 1), -1, 8, 0)

    # Compute weight sum of image, blurred image and mask it
    w_sum = cv2.addWeighted(img, 4, cv2.GaussianBlur(img, (0, 0), scale / 30), -4, 128) * circular_mask + 128 * (1 - circular_mask)
    return w_sum.astype(np.uint8)


# https://github.com/btgraham/SparseConvNet/blob/kaggle_Diabetic_Retinopathy_competition/Data/
# kaggleDiabeticRetinopathy/preprocessImages.py
项目:TikZ    作者:ellisk42    | 项目源码 | 文件源码
def analyzeAsymmetric(a,b):
    # a = target
    # b = current
    # if you see a pixel in current that isn't in target, that's really bad
    # if you see a pixel and target that isn't an current, that's not so bad
    import cv2
    kernelSize = blurKernelSize


    a = cv2.GaussianBlur(a,(kernelSize,kernelSize),sigmaX = 0)
    b = cv2.GaussianBlur(b,(kernelSize,kernelSize),sigmaX = 0)

    showImage(a + b)

    d = a - b
    targetBigger = np.sum(d[d > 0]*d[d > 0])
    currentBigger = np.sum(d[d < 0]*d[d < 0])
    print "targetBigger = %f"%targetBigger
    print "currentBigger = %f"%currentBigger

#    showImage(b)

    return currentBigger*2 + targetBigger
项目:Brewereader    作者:ceafdc    | 项目源码 | 文件源码
def find_lines(img, acc_threshold=0.25, should_erode=True):
    if len(img.shape) == 3 and img.shape[2] == 3:  # if it's color
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img = cv2.GaussianBlur(img, (11, 11), 0)
    img = cv2.adaptiveThreshold(
            img,
            255,
            cv2.ADAPTIVE_THRESH_MEAN_C,
            cv2.THRESH_BINARY,
            5,
            2)

    img = cv2.bitwise_not(img)

    # thresh = 127
    # edges = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
    # edges = cv2.Canny(blur, 500, 500, apertureSize=3)

    if should_erode:
        element = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
        img = cv2.erode(img, element)

    theta = np.pi/2000
    angle_threshold = 2
    horizontal = cv2.HoughLines(
            img,
            1,
            theta,
            int(acc_threshold * img.shape[1]),
            min_theta=np.radians(90 - angle_threshold),
            max_theta=np.radians(90 + angle_threshold))
    vertical = cv2.HoughLines(
            img,
            1,
            theta,
            int(acc_threshold * img.shape[0]),
            min_theta=np.radians(-angle_threshold),
            max_theta=np.radians(angle_threshold),
            )

    horizontal = list(horizontal) if horizontal is not None else []
    vertical = list(vertical) if vertical is not None else []

    horizontal = [line[0] for line in horizontal]
    vertical = [line[0] for line in vertical]

    horizontal = np.asarray(horizontal)
    vertical = np.asarray(vertical)

    return horizontal, vertical
项目:Python_SelfLearning    作者:fukuit    | 项目源码 | 文件源码
def convert_and_count(target, ext):
    for root, dirs, files in os.walktarget):
        for f in files:
            fname, fext = os.path.splitext(f)
            if fext == ext:
                img = cv2.imread(os.path.join(root, f), 0)
                blur = cv2.GaussianBlur(img, (5, 5), 0)
                ret, imgb = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
                cv2.imwrite(os.path.join(root, fname + ".bin.png"), imgb)
                cnt = 0
                for val in imgb.flat:
                    if val == 0:
                        cnt += 1
                ratio = cnt / img.size
                msg = "%s:\t%.5f" % (f, ratio)
                print(msg)
项目:Shoe-Shape-Classifier    作者:jrzaurin    | 项目源码 | 文件源码
def threshold_img(img):
    """
    Simple wrap-up function for cv2.threshold()
    """

    is_color = len(img.shape) == 3
    is_grey  = len(img.shape) == 2

    t = threshold_value(img)

    if is_color:
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    elif is_grey:
        gray = img.copy()

    blurred = cv2.GaussianBlur(gray, (3, 3), 0)
    (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV)

    return thresh
项目:Shoe-Shape-Classifier    作者:jrzaurin    | 项目源码 | 文件源码
def threshold_img(img):
    """
    Simple wrap-up function for cv2.threshold()
    """

    is_color = len(img.shape) == 3
    is_grey  = len(img.shape) == 2

    t = threshold_value(img)

    if is_color:
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    elif is_grey:
        gray = img.copy()

    blurred = cv2.GaussianBlur(gray, (3, 3), 0)
    (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV)

    return thresh