Python cv2 模块,medianBlur() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用cv2.medianBlur()

项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 3000 and cv2.contourArea(cnt) < 25000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    #cv2.imshow('arr', arr)
    #k = cv2.waitKey(0)
    return arr
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def createTrainingData(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    try:
        os.makedirs("trainingdata_"+filename)
    except OSError:
        pass
    os.chdir("trainingdata_"+filename)
    length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(vidcap.get(cv2.CAP_PROP_FPS))
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        cv2.imwrite('p1_'+str(time)+".png",p1)
        cv2.imwrite('p2_'+str(time)+".png",p2)
    os.chdir("..")
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def dif_gaus(image, lower, upper):
        lower, upper = int(lower-1), int(upper-1)
        lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0)
        upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0)
        # upper +=50
        # lower +=50
        dif = lower-upper
        # dif *= .1
        # dif = cv2.medianBlur(dif,3)
        # dif = 255-dif
        dif = cv2.inRange(dif, np.asarray(200),np.asarray(256))
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
        dif = cv2.dilate(dif, kernel, iterations=2)
        dif = cv2.erode(dif, kernel, iterations=1)
        # dif = cv2.max(image,dif)
        # dif = cv2.dilate(dif, kernel, iterations=1)
        return dif
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def skin_detect(self, raw_yrb, img_src):
        # use median blurring to remove signal noise in YCRCB domain
        raw_yrb = cv2.medianBlur(raw_yrb, 5)
        mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)

        # morphological transform to remove unwanted part
        kernel = np.ones((5, 5), np.uint8)
        #mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel)
        mask_skin = cv2.dilate(mask_skin, kernel, iterations=2)

        res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin)
        #res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21)

        return res_skin


# Do background subtraction with some filtering
项目:checkmymeat    作者:kendricktan    | 项目源码 | 文件源码
def predict(url):
    global model      
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    features = describe(image, mask)

    state = le.inverse_transform(model.predict([features]))[0]
    return {'type': state}
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def read_captured_circles(self):
        img = cv2.cvtColor(self.query, cv2.COLOR_BGR2GRAY)
        img = cv2.medianBlur(img, 7)
        cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

        circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 30,
                                   param1=50, param2=30, minRadius=20, maxRadius=50)
        if circles is None:
            return
        circles = np.uint16(np.around(circles))
        for i in circles[0, :]:
            if i[1] < 400:
                continue
            self.circlePoints.append((i[0], i[1]))
        if self._debug:
            self.draw_circles(circles, cimg)
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 35000 and cv2.contourArea(cnt) < 15000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    cv2.imshow('arr', arr)
    k = cv2.waitKey(0)
    return arr
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 3600 and cv2.contourArea(cnt) < 25000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    cv2.imshow('arr', arr)
    k = cv2.waitKey(0)
    return arr
项目:Pacbot    作者:HarvardURC    | 项目源码 | 文件源码
def _process_image(self, image):
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        hsv = cv2.medianBlur(hsv, 5)
        draw_col = (0,0,255)

        p1 = (self.edges['l'], self.edges['d'])
        p2 = (self.edges['r'], self.edges['u'])
        cv2.rectangle(hsv, p1, p2, draw_col)

        vert_spacing = (self.edges['r'] - self.edges['l'])/float(len(grid))
        for i in range(1, len(grid)):
            x_pos = int(self.edges['l'] + i*vert_spacing)
            p1 = (x_pos, self.edges['d'])
            p2 = (x_pos, self.edges['u'])
            cv2.line(hsv, p1, p2, draw_col)

        horiz_spacing = (self.edges['d'] - self.edges['u'])/float(len(grid[0]))
        for i in range(1, len(grid[0])):
            y_pos = int(self.edges['u'] + i*horiz_spacing)
            p1 = (self.edges['l'], y_pos)
            p2 = (self.edges['r'], y_pos)
            cv2.line(hsv, p1, p2, draw_col)

        return hsv
项目:Pacbot    作者:HarvardURC    | 项目源码 | 文件源码
def _detect_bot(self, hsv_image):
        BOT_MIN = np.array([28,8,100], np.uint8)
        BOT_MAX = np.array([32,255,255], np.uint8)

        thresholded_image = cv2.inRange(hsv_image, BOT_MIN, BOT_MAX)
        thresholded_image = cv2.medianBlur(thresholded_image, 15)

        _, contours, hierarchy = cv2.findContours(thresholded_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            (bot_x, bot_y) = (-1000,-1000)
        else:
            bot = contours[0]
            M = cv2.moments(bot)
            if len(bot) > 2:
                bot_x = int(M['m10']/M['m00'])
                bot_y = int(M['m01']/M['m00'])
            else:
                (bot_x, bot_y) = (-1000,-1000)

        return thresholded_image, (bot_x, bot_y)
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    img=cv2.medianBlur(img,5)
    kernel=np.ones((3,3),np.uint8)

    #img=cv2.erode(img,kernel,iterations = 1)
    sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    dilation = cv2.dilate(sobel, element2, iterations = 1)
    erosion = cv2.erode(dilation, element1, iterations = 1)
    dilation2 = cv2.dilate(erosion, element2,iterations = 3)
    #img=cv2.dilate(img,kernel,iterations = 1)
    #img=cv2.Canny(img,100,200)
    return dilation2
项目:smart-cam    作者:smart-cam    | 项目源码 | 文件源码
def frameBlurrer(writer_blurrer_filename_Queue, blur_to_motiondetector_blurred_Queue):
    while True:
        BLURS = list()
        FRAMES = list()
        filename = writer_blurrer_filename_Queue.get()

        t1 = time.time()
        camera = cv2.VideoCapture(filename)
        for counter in xrange(0, FRAMES_PER_CLIP):
            ret, frame = camera.read()
            FRAMES.append(frame)
        camera.release()

        while len(FRAMES) > 0:
            frame = FRAMES.pop(0)
            blurred = cv2.medianBlur(frame, 9)
            BLURS.append(blurred)

        print "Blurred", time.time() - t1

        # Sending blurs to motion detector
        blur_to_motiondetector_blurred_Queue.put((filename, BLURS))
        del filename
        del BLURS
    return
项目:sia-cog    作者:deepakkumar1984    | 项目源码 | 文件源码
def extracttext(imgpath, preprocess):
    if imgpath.startswith('http://') or imgpath.startswith('https://') or imgpath.startswith('ftp://'):
        image = url_to_image(imgpath)
    else:
        image = cv2.imread(imgpath)

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    if preprocess == "thresh":
        gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    elif preprocess == "blur":
        gray = cv2.medianBlur(gray, 3)

    filename = "{}.png".format(os.getpid())
    cv2.imwrite(filename, gray)
    text = pytesseract.image_to_string(Image.open(filename))

    os.remove(filename)
    return {"text": text}
项目:cozmo_beyond    作者:PeterMitrano    | 项目源码 | 文件源码
def __blur(src, type, radius):
        """Softens an image using one of several filters.
        Args:
            src: The source mat (numpy.ndarray).
            type: The blurType to perform represented as an int.
            radius: The radius for the blur as a float.
        Returns:
            A numpy.ndarray that has been blurred.
        """
        if(type is BlurType.Box_Blur):
            ksize = int(2 * round(radius) + 1)
            return cv2.blur(src, (ksize, ksize))
        elif(type is BlurType.Gaussian_Blur):
            ksize = int(6 * round(radius) + 1)
            return cv2.GaussianBlur(src, (ksize, ksize), round(radius))
        elif(type is BlurType.Median_Filter):
            ksize = int(2 * round(radius) + 1)
            return cv2.medianBlur(src, ksize)
        else:
            return cv2.bilateralFilter(src, -1, round(radius), round(radius))
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def render(self,frame):
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 7
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        # -- STEP 5 --
        # convert back to color so that it can be bit-ANDed with color image
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        final = cv2.bitwise_and(img_color, img_edge)
        return cv2.medianBlur(final,7)
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def render(self,frame):
        canvas = cv2.imread("pen.jpg", cv2.CV_8UC1)
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 3
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 3)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 3)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        return  cv2.multiply(cv2.medianBlur(img_edge,7), canvas, scale=1./256)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def median_blur(im, size=3): 
    return cv2.medianBlur(im, size)
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def getDepth(self):
        """
        Return a median smoothed depth image
        :return: depth data as numpy array
        """

        if self.mirror:
            depth = dsc.getDepthMap()[:, ::-1]
        else:
            depth = dsc.getDepthMap()
        depth = cv2.medianBlur(depth, 3)
        return (numpy.count_nonzero(depth) != 0), numpy.asarray(depth, numpy.float32)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def equalize(image, image_lower=0.0, image_upper=255.0):
    image_lower = int(image_lower*2)/2
    image_lower +=1
    image_lower = max(3,image_lower)
    mean = cv2.medianBlur(image,255)
    image = image - (mean-100)
    # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
    # cv2.dilate(image, kernel, image, iterations=1)
    return image
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def background_subtract(self, img_src):
        fgmask = self.fgbg.apply(cv2.GaussianBlur(img_src, (25, 25), 0))
        kernel = np.ones((5, 5), np.uint8)
        fgmask = cv2.dilate(fgmask, kernel, iterations=2)
        #fgmask = self.fgbg.apply(cv2.medianBlur(img_src, 11))
        org_fg = cv2.bitwise_and(img_src, img_src, mask=fgmask)
        return org_fg

# Update Position of ROI
项目:Magic-Pixel    作者:zhwhong    | 项目源码 | 文件源码
def medianBlur(srcpath, dstpath):
    img = cv2.imread(srcpath, 0)
    blur = cv2.medianBlur(img, 3)
    # cv2.imshow(dstpath, img)
    # cv2.imwrite(dstpath, blur)
    plt.subplot(1,2,1),plt.imshow(img,'gray')
    plt.subplot(1,2,2),plt.imshow(blur,'gray')
    plt.show()

# ????
项目:cv-lane    作者:kendricktan    | 项目源码 | 文件源码
def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top

    # Gets metadata from our contours
项目:checkmymeat    作者:kendricktan    | 项目源码 | 文件源码
def predict(url):
    global model, COOKED_PHRASES, RAW_PHRASES   
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    # Get features
    features = describe(image, mask)

    # Predict it
    result = model.predict([features])
    probability = model.predict_proba([features])[0][result][0]        
    state = le.inverse_transform(result)[0]

    phrase = ''

    if 'cook' in state:
        phrase = COOKED_PHRASES[int(random.random()*len(COOKED_PHRASES))]
    elif 'raw' in state:
        phrase = RAW_PHRASES[int(random.random()*len(RAW_PHRASES))]

    return {'type': state, 'confidence': probability, 'phrase': phrase}
项目:BlurDetection2    作者:WillBrennan    | 项目源码 | 文件源码
def pretty_blur_map(blur_map, sigma=5):
    abs_image = numpy.log(numpy.abs(blur_map).astype(numpy.float32))
    cv2.blur(abs_image, (sigma, sigma))
    return cv2.medianBlur(abs_image, sigma)
项目:doc2text    作者:jlsutherland    | 项目源码 | 文件源码
def reduce_noise_raw(im):
    bilat = cv2.bilateralFilter(im, 9, 75, 75)
    blur = cv2.medianBlur(bilat, 5)
    return blur
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def median_fltr_opencv(dem, size=3, iterations=1):
    """OpenCV median filter
    """
    import cv2
    dem = malib.checkma(dem)
    if size > 5:
        print("Need to implement iteration")
    n = 0
    out = dem
    while n <= iterations:
        dem_cv = cv2.medianBlur(out.astype(np.float32).filled(np.nan), size)
        out = np.ma.fix_invalid(dem_cv)
        out.set_fill_value(dem.fill_value)
        n += 1
    return out
项目:diddyborg    作者:piborg    | 项目源码 | 文件源码
def ProcessImage(self, image):
        global autoMode
        # Get the red section of the image
        image = cv2.medianBlur(image, 5)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels!
        red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255)))
        # Find the contours
        contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        # Go through each contour
        foundArea = -1
        foundX = -1
        foundY = -1
        for contour in contours:
            x,y,w,h = cv2.boundingRect(contour)
            cx = x + (w / 2)
            cy = y + (h / 2)
            area = w * h
            if foundArea < area:
                foundArea = area
                foundX = cx
                foundY = cy
        if foundArea > 0:
            ball = [foundX, foundY, foundArea]
        else:
            ball = None
        # Set drives or report ball status
        if autoMode:
            self.SetSpeedFromBall(ball)
        else:
            if ball:
                print 'Ball at %d,%d (%d)' % (foundX, foundY, foundArea)
            else:
                print 'No ball'

    # Set the motor speed from the ball position
项目:diddyborg    作者:piborg    | 项目源码 | 文件源码
def ProcessImage(self, image):
        # Get the red section of the image
        image = cv2.medianBlur(image, 5)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels!
        red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255)))
        # Find the contours
        contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        # Go through each contour
        foundArea = -1
        foundX = -1
        foundY = -1
        for contour in contours:
            x,y,w,h = cv2.boundingRect(contour)
            cx = x + (w / 2)
            cy = y + (h / 2)
            area = w * h
            if foundArea < area:
                foundArea = area
                foundX = cx
                foundY = cy
        if foundArea > 0:
            ball = [foundX, foundY, foundArea]
        else:
            ball = None
        # Set drives or report ball status
        self.SetSpeedFromBall(ball)

    # Set the motor speed from the ball position
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def process(self, img, median_filtering=True, blur_kn_size=3, 
                artif_suppression=True, low_int_threshold=.05, kernel_size=15,
                pect_removal=False, high_int_threshold=.8, **pect_kwargs):
        '''Perform multi-stage preprocessing on the input image
        Args:
            blur_kn_size ([int]): kernel size for median blurring.
            low_int_threshold ([int]): cutoff used in artifacts suppression.
            high_int_threshold ([int]): cutoff used in pectoral muscle removal.
        Returns:
            a tuple of (processed_image, color_image_with_boundary). If 
            pectoral removal was not called, the color image is None.
        '''
        img_proc = img.copy()
        if median_filtering:
            img_proc = cv2.medianBlur(img_proc, blur_kn_size)
        if artif_suppression:
            img_proc, mask_ = self.suppress_artifacts(
                img_proc, global_threshold=low_int_threshold, 
                kernel_size=kernel_size)
        else:
            _, mask_ = self.suppress_artifacts(img_proc)
        if pect_removal:
            img_proc, img_col = self.remove_pectoral(
                img_proc, mask_, high_int_threshold=high_int_threshold, 
                **pect_kwargs)
        else:
            img_col = None

        return (img_proc, img_col)
项目:Pacbot    作者:HarvardURC    | 项目源码 | 文件源码
def __process_image(self, image):

        # Our operations on the frame come here
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        hsv = cv2.medianBlur(hsv, 5)

        draw_col = (0,0,255)

        p1 = (LEFT_EDGE, BOTTOM_EDGE)
        p2 = (RIGHT_EDGE, TOP_EDGE)
        cv2.rectangle(hsv, p1, p2, draw_col) #bounding rectangle

        vert_spacing = (RIGHT_EDGE - LEFT_EDGE)/31. # vertical lines
        for i in range(1, 31): 
            x_pos = int(LEFT_EDGE + i*vert_spacing)
            p1 = (x_pos, BOTTOM_EDGE)
            p2 = (x_pos, TOP_EDGE)
            cv2.line(hsv, p1, p2, draw_col)

        horiz_spacing = (BOTTOM_EDGE - TOP_EDGE)/28. # horizontal lines
        for i in range(1, 28): 
            y_pos = int(TOP_EDGE + i*horiz_spacing)
            p1 = (LEFT_EDGE, y_pos)
            p2 = (RIGHT_EDGE, y_pos)
            cv2.line(hsv, p1, p2, draw_col)


        # cv2.imshow('Grid', hsv)
        # cv2.waitKey(1)
        # box around target pixel for testing
        # pt = (350, 600)
        # cv2.circle(hsv, pt, 3, draw_col, thickness =1)
        # print hsv[600][350]


        return hsv
项目:Pacbot    作者:HarvardURC    | 项目源码 | 文件源码
def __detect_bot(self, hsv_image):

        # Experimentally determined LED thresholds
        BOT_MIN = np.array([28,8,100], np.uint8)
        BOT_MAX = np.array([32,255,255], np.uint8)

        thresholded_image = cv2.inRange(hsv_image, BOT_MIN, BOT_MAX)
        thresholded_image = cv2.medianBlur(thresholded_image, 15)

        # cv2.imshow('Yellow Tresh', thresholded_image)
        # cv2.waitKey(1)

        contours, hierarchy = cv2.findContours(thresholded_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if not contours:
            (bot_x, bot_y) = (-1000,-1000)
        else:       
            bot = contours[0]
            M = cv2.moments(bot)
            if len(bot) > 2:
                bot_x = int(M['m10']/M['m00'])
                bot_y = int(M['m01']/M['m00'])
            else:
                bot_x = self.current_location[0]
                bot_y = self.current_location[1]

        return thresholded_image, (bot_x, bot_y)
项目:hand-gesture-recognition-opencv    作者:mahaveerverma    | 项目源码 | 文件源码
def hand_threshold(frame_in,hand_hist):
    frame_in=cv2.medianBlur(frame_in,3)
    hsv=cv2.cvtColor(frame_in,cv2.COLOR_BGR2HSV)
    hsv[0:int(cap_region_y_end*hsv.shape[0]),0:int(cap_region_x_begin*hsv.shape[1])]=0 # Right half screen only
    hsv[int(cap_region_y_end*hsv.shape[0]):hsv.shape[0],0:hsv.shape[1]]=0
    back_projection = cv2.calcBackProject([hsv], [0,1],hand_hist, [00,180,0,256], 1)
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_elem_size,morph_elem_size))
    cv2.filter2D(back_projection, -1, disc, back_projection)
    back_projection=cv2.GaussianBlur(back_projection,(gaussian_ksize,gaussian_ksize), gaussian_sigma)
    back_projection=cv2.medianBlur(back_projection,median_ksize)
    ret, thresh = cv2.threshold(back_projection, hsv_thresh_lower, 255, 0)

    return thresh

# 3. Find hand contour
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def xmedian(ref,mwid):
    temp=np.isnan(ref)
    tmean=np.nanmean(ref)
    ref[temp]=tmean
    ref2=cv2.blur(ref,(mwid,mwid))
    ref[temp]=ref2[temp]
    tempx=np.uint8(255*ref)
    return cv2.medianBlur(tempx,mwid)/255.0
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def ymedian0(aero,cls,mwid):
    temp=np.isnan(aero)
    tmean=np.nanmean(aero)
    aero[temp]=tmean
    aero2=cv2.blur(aero,(mwid,mwid))
    aero[temp]=aero2[temp]
    tempx=np.uint8(100*aero)
    aerox=cv2.medianBlur(tempx,mwid)/100.0
    return aerox
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def ymedian(aero,cls,mwid,twid):
    temp=np.isnan(aero)
    tmean=np.nanmean(aero)
    aero[temp]=tmean
    aero2=cv2.blur(aero,(mwid,mwid))
    aero[temp]=aero2[temp]
    # 4/28/2016
    #tempx=np.uint8(255*aero)
    tempx=np.uint8(100*aero)
    #aerox=cv2.medianBlur(tempx,mwid)/255.0
    aerox=cv2.medianBlur(tempx,mwid)/100.0
    ptemp=np.where(np.abs(aero-aerox) > twid)
    cls[ptemp]=-1
    return aerox
项目:GidroGraf-Sirius    作者:alf3r    | 项目源码 | 文件源码
def blur(self):
        px = 5
        self.data = cv2.blur(self.data, (px, px))
        # self.data = cv2.medianBlur(self.data, px)
项目:deep-prior-pp    作者:moberweger    | 项目源码 | 文件源码
def getDepth(self):
        """
        Return a median smoothed depth image
        :return: depth data as numpy array
        """

        if self.mirror:
            depth = dsc.getDepthMap()[:, ::-1]
        else:
            depth = dsc.getDepthMap()
        depth = cv2.medianBlur(depth, 3)
        return (numpy.count_nonzero(depth) != 0), numpy.asarray(depth, numpy.float32)
项目:UAV-and-TrueOrtho    作者:LeonChen66    | 项目源码 | 文件源码
def main():
    data = pd.read_csv(
        'Leon_group1_densified_point_cloud.xyz',
        names=['X', 'Y', 'Z', 'C_R','C_G','C_B'],
        delim_whitespace=True)
    # Calculate Geotiff information
    Auto = True

    # If it is auto
    if Auto == True:
        # spacing could be changed
        spacing = 1.6*get_space(data)

        w = int((data.X.max() - data.X.min()) / spacing)
        h = int((data.Y.max() - data.Y.min()) / spacing)
        affine_par = [spacing,0,0,-spacing,data.X.min(),data.Y.max()]

    else:
        affine_name = ''
        affine_par = np.loadtxt(affine_name)   # input the affine name
        h = 1792
        w = 1053

    print(affine_par)
    print(h,w)
    # Generate DEM
    ortho = GEM_Dsm(data, h, w, 3, 0.15,affine_par)
    # save to tif
    ortho = ortho.astype(np.uint8)
    # ortho = cv2.medianBlur(ortho, 3)
    cv2.imwrite('ortho.tif',ortho)

    array2Raster(ortho,affine_par,'test.tif')
项目:Smart-Car    作者:jimchenhub    | 项目源码 | 文件源码
def getDisparity(stereo, img1, img2, mapx1, mapy1, mapx2, mapy2):
    dst1 = cv2.remap(img1, mapx1, mapy1, cv2.INTER_LINEAR)
    dst2 = cv2.remap(img2, mapx2, mapy2, cv2.INTER_LINEAR)
    gray1 = cv2.cvtColor(dst1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(dst2, cv2.COLOR_BGR2GRAY)
    disparity = stereo.compute(gray1, gray2)/16
    # disparity = cv2.medianBlur(disparity, 5)
    return disparity
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def Median(self, img, size):
        dImg = cv2.medianBlur(img, size)
        return dImg
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def preprocess(img):
    '''????????'''
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    #img=cv2.GaussianBlur(img,(3,3),0)
    img=cv2.medianBlur(img,5)
    img=cv2.equalizeHist(img)
    return img
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    gau=cv2.GaussianBlur(gray,(5,5),0)
    ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    med=cv2.medianBlur(thre,5)
    canny=cv2.Canny(thre,100,200)
    #sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
    dilation=cv2.dilate(canny,element2,iterations = 1)
    dst=cv2.erode(dilation, element1, iterations = 1)
    return dst
项目:Stronghold-2016-Vision    作者:team4099    | 项目源码 | 文件源码
def threshold_image_for_tape(image):
    """
    Thresholds image for reflective tape with light shined on it. This means it
    looks for pixels that are almost white, makes them white, and makes
    everything else black.

    Parameters:
        :param: `image` - the source image to threshold from
    """
    orig_image = numpy.copy(image)
    # print orig_image.size
    orig_image = cv2.medianBlur(orig_image, 3)
    # orig_image[orig_image > 100] = 255
    # return orig_image[orig_image > 100]
    height, width = orig_image.shape[0], orig_image.shape[1]
    eight_bit_image = numpy.zeros((height, width, 1), numpy.uint8)
    cv2.inRange(orig_image,
                (B_RANGE[0], G_RANGE[0], R_RANGE[0], 0),
                (B_RANGE[1], G_RANGE[1], R_RANGE[1], 100),
                eight_bit_image)
    # # eight_bit_image = cv2.adaptiveThreshold(orig_image,
    # #                             255,
    # #                             cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
    # #                             cv2.THRESH_BINARY,
    # #                             8,
    # #                             0)
    # cv2.medianBlur(eight_bit_image, 9)
    return eight_bit_image
项目:Simple-deCAPTCHA    作者:BLKStone    | 项目源码 | 文件源码
def th2(self,img):
        # ?????
        # ????
        # median = cv2.medianBlur(thresh,3)
        # img_blur = cv2.GaussianBlur(img_gray, (m_blurBlock,m_blurBlock), 0)
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)     
        thresh = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 
            cv2.THRESH_BINARY, 11, 19)
        return thresh

    # ?????
项目:suiron    作者:kendricktan    | 项目源码 | 文件源码
def get_median_blur(gray_frame):
    return cv2.medianBlur(gray_frame, 5)

# Canny edge detection
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def videoToImageArray(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    pictures = [[],[]]
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)      # just cue to 20 sec. position
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        pictures[0].append(p1)
        pictures[1].append(p2)
    return pictures
项目:Sign-Language-Recognition    作者:Anmol-Singh-Jaggi    | 项目源码 | 文件源码
def make_background_black(frame):
    """
    Makes everything apart from the main object of interest to be black in color.
    """
    print("Making background black...")

    # Convert from RGB to HSV
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Prepare the first mask.
    # Tuned parameters to match the skin color of the input images...
    lower_boundary = np.array([0, 40, 30], dtype="uint8")
    upper_boundary = np.array([43, 255, 254], dtype="uint8")
    skin_mask = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Apply a series of erosions and dilations to the mask using an
    # elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    skin_mask = cv2.erode(skin_mask, kernel, iterations=2)
    skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)

    # Prepare the second mask
    lower_boundary = np.array([170, 80, 30], dtype="uint8")
    upper_boundary = np.array([180, 255, 250], dtype="uint8")
    skin_mask2 = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Combine the effect of both the masks to create the final frame.
    skin_mask = cv2.addWeighted(skin_mask, 0.5, skin_mask2, 0.5, 0.0)
    # Blur the mask to help remove noise.
    # skin_mask = cv2.medianBlur(skin_mask, 5)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)
    frame = cv2.addWeighted(frame, 1.5, frame_skin, -0.5, 0)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)

    print("Done!")
    return frame_skin
项目:5_FingerPrint_mosaic    作者:Bodyrelife    | 项目源码 | 文件源码
def pre_process_debug(image):
    cv2.imshow("Image", image)
    image_eqhist = equalize_hist(image)
    cv2.imshow("Equalize Hist", image_eqhist)
    image_blur = cv2.medianBlur(image, 5)
    cv2.imshow("Blur", image_blur)
    image_blur_eqhist = equalize_hist(image_blur)
    cv2.imshow("Blur + Equalize Hist", image_blur_eqhist)
    image_eqhist_blur = cv2.medianBlur(image_eqhist, 5)
    cv2.imshow("Equalize Hist + Blur", image_eqhist_blur)
    cv2.waitKey(0)