Python scipy.ndimage.filters 模块,median_filter() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用scipy.ndimage.filters.median_filter()

项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def flatFieldFromCloseDistance(imgs, bg_imgs=None):
    '''
    Average multiple images of a homogeneous device
    imaged directly in front the camera lens.

    if [bg_imgs] are not given, background level is extracted
        from 1% of the cumulative intensity distribution
        of the averaged [imgs]

    This measurement method is referred as 'Method A' in
    ---
    K.Bedrich, M.Bokalic et al.:
    ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
    ADVANCED FLAT FIELD CALIBRATION,2017
    ---
    '''
    img = imgAverage(imgs)
    bg = getBackground2(bg_imgs, img)
    img -= bg
    img = toGray(img)
    mx = median_filter(img[::10, ::10], 3).max()
    img /= mx
    return img
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def sensitivity(imgs, bg=None):
    '''
    Extract pixel sensitivity from a set of homogeneously illuminated images

    This method is detailed in Section 5 of:
    ---
    K.Bedrich, M.Bokalic et al.:
    ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
    ADVANCED FLAT FIELD CALIBRATION,2017
    ---

    '''
    bg = getBackground(bg)
    for n, i in enumerate(imgs):
        i = imread(i, dtype=float)
        i -= bg
        smooth = fastMean(median_filter(i, 3))
        i /= smooth
        if n == 0:
            out = i
        else:
            out += i
    out /= (n + 1)
    return out
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def activate(self):
        image = self.display.widget.image

        self._setupMenu()

        scale = mn = 0
        # TRANSFORM TO UINT8:
        orig_dtype = image.dtype
        if orig_dtype != np.uint8:
            if self.pConvMethod.value() == 'clip':
                image = np.clip(image, 0, 255).astype(np.uint8)
#                 image = [np.uint8(np.clip(i, 0, 255)) for i in image]
            else:  # scale
                med = median_filter(image[0], 3)
                mn = np.min(med)
                image -= mn  # set min to 0
                scale = np.max(med) / 255
                image /= scale
                image = np.clip(image, 0, 255).astype(np.uint8)

        self.startThread(
            lambda image=image, scale=scale, mn=mn, orig_dtype=orig_dtype:
                self._process(image, scale, mn, orig_dtype), self._processDone)
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def median_fltr(dem, fsize=7, origmask=False):
    """Scipy.ndimage median filter

    Does not properly handle NaN
    """
    print("Applying median filter with size %s" % fsize)
    from scipy.ndimage.filters import median_filter
    dem_filt_med = median_filter(dem.filled(np.nan), fsize)
    #Now mask all nans
    out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value)
    if origmask:
        out = np.ma.array(out, mask=dem.mask, fill_value=dem.fill_value)
    out.set_fill_value(dem.fill_value)
    return out

#Use the OpenCV median filter - still propagates NaN
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def MAD(x,n=3,fil=1):
        if fil == 1:
            meda = med.median_filter(x,size = (n,n))
        else:
            meda = np.median(x)
        medfil = np.abs(x-meda)
        sh = np.shape(x)
        sigma = 1.48*np.median((medfil))
        return sigma
项目:srep    作者:Answeror    | 项目源码 | 文件源码
def __call__(self, data, num_semg_row, num_semg_col, **kargs):
        return np.array([median_filter(image, 3).ravel() for image
                         in data.reshape(-1, num_semg_row, num_semg_col)])
项目:srep    作者:Answeror    | 项目源码 | 文件源码
def _csl_cut(data, framerate):
    window = int(np.round(150 * framerate / 2048))
    data = data[:len(data) // window * window].reshape(-1, 150, data.shape[1])
    rms = np.sqrt(np.mean(np.square(data), axis=1))
    rms = [median_filter(image, 3).ravel() for image in rms.reshape(-1, 24, 7)]
    rms = np.mean(rms, axis=1)
    threshold = np.mean(rms)
    mask = rms > threshold
    for i in range(1, len(mask) - 1):
        if not mask[i] and mask[i - 1] and mask[i + 1]:
            mask[i] = True
    from .. import utils
    begin, end = max(utils.continuous_segments(mask),
                     key=lambda s: (mask[s[0]], s[1] - s[0]))
    return begin * window, end * window
项目:srep    作者:Answeror    | 项目源码 | 文件源码
def _get_amp(data, framerate, num_semg_row, num_semg_col):
    data = np.abs(data)
    data = np.transpose([lowpass(ch, 2, framerate, 4, zero_phase=True) for ch in data.T])
    return [median_filter(image, 3).mean() for image in data.reshape(-1, num_semg_row, num_semg_col)]
项目:studyforrest-data-phase2    作者:psychoinformatics-de    | 项目源码 | 文件源码
def preprocess_eyegaze(eyegaze, blink_margin=200, filter_width=40):
    from scipy.ndimage.morphology import binary_dilation
    from scipy.ndimage.filters import median_filter

    mask = binary_dilation(np.isnan(eyegaze['x']), iterations=blink_margin)
    # filter x and y coordinate separately
    eyegaze['x'] = median_filter(eyegaze['x'], size=filter_width)
    eyegaze['y'] = median_filter(eyegaze['y'], size=filter_width)
    # blank blink margin
    eyegaze['x'][mask] = np.nan
    eyegaze['y'][mask] = np.nan
    return eyegaze
项目:mirapie    作者:Chutlhu    | 项目源码 | 文件源码
def par_kernel(Pj):
        return  median_filter(Pj, footprint = self.proximity_kernel)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def getBackgroundLevel(img):
    # seems to be best one according of no-ref bg comparison
    # as done for SNR article in BEDRICH2016 JPV
    # return median_filter(img[10:-10:10,10:-10:10],7).min() #lest approach -
    # not good
    return cdf(median_filter(img[10:-10:10, 10:-10:10], 3), 0.01)
#     return cdf(img[10:-10:10,10:-10:10],0.02)
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _import():
    global median_filter
    from scipy.ndimage.filters import median_filter
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def median_fltr_skimage(dem, radius=3, erode=1, origmask=False):
    """
    Older skimage.filter.median_filter

    This smooths, removes noise and fills in nodata areas with median of valid pixels!  Effectively an inpainting routine
    """
    #Note, ndimage doesn't properly handle ma - convert to nan
    dem = malib.checkma(dem)
    dem = dem.astype(np.float64)
    #Mask islands
    if erode > 0:
        print("Eroding islands smaller than %s pixels" % (erode * 2)) 
        dem = malib.mask_islands(dem, iterations=erode)
    print("Applying median filter with radius %s" % radius) 
    #Note: this funcitonality was present in scikit-image 0.9.3
    import skimage.filter
    dem_filt_med = skimage.filter.median_filter(dem, radius, mask=~dem.mask)
    #Starting in version 0.10.0, this is the new filter
    #This is the new filter, but only supports uint8 or unit16
    #import skimage.filters
    #import skimage.morphology 
    #dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask)
    #dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask)
    #Now mask all nans
    #skimage assigns the minimum value as nodata
    #CHECK THIS, seems pretty hacky
    #Also, looks like some valid values are masked at this stage, even though they should be above min
    ndv = np.min(dem_filt_med)
    #ndv = dem_filt_med.min() + 0.001
    out = np.ma.masked_less_equal(dem_filt_med, ndv)
    #Should probably replace the ndv with original ndv
    out.set_fill_value(dem.fill_value)
    if origmask:
        print("Applying original mask")
        #Allow filling of interior holes, but use original outer edge
        #maskfill = malib.maskfill(dem, iterations=radius)
        maskfill = malib.maskfill(dem)
        #dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value)
        out = np.ma.array(out, mask=maskfill, fill_value=dem.fill_value)
    return out
项目:neural-style    作者:jayanthkoushik    | 项目源码 | 文件源码
def deprocess_img_and_save(img, filename):
    """Undo pre-processing on an image, and save it."""
    img = img[0, :, :, :]
    add_imagenet_mean(img)
    img = img[::-1].transpose((1, 2, 0))
    img = np.clip(img, 0, 255).astype(np.uint8)
    img = median_filter(img, size=(3, 3, 1))
    try:
        imsave(filename, img)
    except OSError as e:
        print(e)
        sys.exit(1)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def SNR(img1, img2=None, bg=None,
        noise_level_function=None,
        constant_noise_level=False,
        imgs_to_be_averaged=False):
    '''
    Returns a signal-to-noise-map
    uses algorithm as described in BEDRICH 2016 JPV (not jet published)

    :param constant_noise_level: True, to assume noise to be constant
    :param imgs_to_be_averaged: True, if SNR is for average(img1, img2)
    '''
    # dark current subtraction:
    img1 = np.asfarray(img1)
    if bg is not None:
        img1 = img1 - bg

    # SIGNAL:
    if img2 is not None:
        img2_exists = True
        img2 = np.asfarray(img2) - bg
        # signal as average on both images
        signal = 0.5 * (img1 + img2)
    else:
        img2_exists = False
        signal = img1
    # denoise:
    signal = median_filter(signal, 3)

    # NOISE
    if constant_noise_level:
        # CONSTANT NOISE
        if img2_exists:
            d = img1 - img2
            # 0.5**0.5 because of sum of variances
            noise = 0.5**0.5 * np.mean(np.abs((d))) * F_RMS2AAD
        else:
            d = (img1 - signal) * F_NOISE_WITH_MEDIAN
            noise = np.mean(np.abs(d)) * F_RMS2AAD
    else:
        # NOISE LEVEL FUNCTION
        if noise_level_function is None:
            noise_level_function, _ = oneImageNLF(img1, img2, signal)
        noise = noise_level_function(signal)
        noise[noise < 1] = 1  # otherwise SNR could be higher than image value

    if imgs_to_be_averaged:
        # SNR will be higher if both given images are supposed to be averaged:
        # factor of noise reduction if SNR if for average(img1, img2):
        noise *= 0.5**0.5

    # BACKGROUND estimation and removal if background not given:
    if bg is None:
        bg = getBackgroundLevel(img1)
        signal -= bg
    snr = signal / noise

    # limit to 1, saying at these points signal=noise:
    snr[snr < 1] = 1
    return snr
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def flatFieldFromCloseDistance2(images, bgImages=None, calcStd=False,
                                nlf=None, nstd=6):
    '''
    Same as [flatFieldFromCloseDistance]. Differences are:
    ... single-time-effect removal included
    ... returns the standard deviation of the image average [calcStd=True]

    Optional:
    -----------
    calcStd -> set to True to also return the standard deviation
    nlf -> noise level function (callable)
    nstd -> artefact needs to deviate more than [nstd] to be removed
    '''

    if len(images) > 1:

        # start with brightest images
        def fn(img):
            img = imread(img)
            s0, s1 = img.shape[:2]
            # rough approx. of image brightness:
            return -img[::s0 // 10, ::s1 // 10].min()

        images = sorted(images, key=lambda i: fn(i))

        avgBg = getBackground2(bgImages, images[1])

        i0 = imread(images[0], dtype=float) - avgBg
        i1 = imread(images[1], dtype=float) - avgBg

        if nlf is None:
            nlf = oneImageNLF(i0, i1)[0]

        det = SingleTimeEffectDetection(
            (i0, i1), nlf, nStd=nstd, calcVariance=calcStd)

        for i in images[1:]:
            i = imread(i)
            # exclude erroneously darker areas:
            thresh = det.noSTE - nlf(det.noSTE) * nstd
            mask = i > thresh
            # filter STE:
            det.addImage(i, mask)

        ma = det.noSTE

    else:
        ma = imread(images[0], dtype=float) - avgBg

    # fast artifact free maximum:
    mx = median_filter(ma[::10, ::10], 3).max()

    if calcStd:
        return ma / mx, det.mma.var**0.5 / mx

    return ma / mx
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def postProcessing(arr, method='KW replace + Gauss', mask=None):
    '''
    Post process measured flat field [arr].
    Depending on the measurement, different
        post processing [method]s are beneficial.
        The available methods are presented in
        ---
        K.Bedrich, M.Bokalic et al.:
        ELECTROLUMINESCENCE IMAGING OF PV DEVICES:
        ADVANCED FLAT FIELD CALIBRATION,2017
        ---

    methods:
        'POLY replace' --> replace [arr] with a 2d polynomial fit
        'KW replace'   -->  ...               a fitted Kang-Weiss function
        'AoV replace'  --> ...                a fitted Angle-of-view function

        'POLY repair' --> same as above but either replacing empty
        'KW repair'       areas of smoothing out high gradient
        'AoV repair'      variations (POLY only)

        'KW repair + Gauss'  --> same as 'KW replace' with additional 
        'KW repair + Median'     Gaussian or Median filter

    mask:
        None/2darray(bool) --> array of same shape ar [arr] indicating
                               invalid or empty positions
    '''
    assert method in ppMETHODS, \
        'post processing method (%s) must be one of %s' % (method, ppMETHODS)

    if method == 'POLY replace':
        return polyfit2dGrid(arr, mask, order=2, replace_all=True)

    elif method == 'KW replace':
        return function(arr, mask, replace_all=True)

    elif method == 'POLY repair':
        return polynomial(arr, mask, replace_all=False)

    elif method == 'KW repair':
        return function(arr, mask, replace_all=False)

    elif method == 'KW repair + Median':
        return median_filter(function(arr, mask, replace_all=False),
                             min(method.shape) // 20)
    elif method == 'KW repair + Gauss':
        return gaussian_filter(function(arr, mask, replace_all=False),
                               min(arr.shape) // 20)

    elif method == 'AoV repair':
        return function(arr, mask, fn=lambda XY, a:
                        angleOfView(XY, method.shape, a=a), guess=(0.01),
                        down_scale_factor=1)

    elif method == 'AoV replace':
        return function(arr, mask, fn=lambda XY, a:
                        angleOfView(XY, arr.shape, a=a), guess=(0.01),
                        replace_all=True, down_scale_factor=1)
项目:SCaIP    作者:simonsfoundation    | 项目源码 | 文件源码
def threshold_components_parallel(pars):


        A_i, i , dims, medw, d, thr_method, se, ss, maxthr, nrgthr, extract_cc  = pars
        A_temp = np.reshape(A_i, dims[::-1])
        A_temp = median_filter(A_temp, medw)
        if thr_method == 'max':
            BW = (A_temp>maxthr*np.max(A_temp))                        
        elif thr_method == 'nrg':        
            Asor = np.sort(np.squeeze(np.reshape(A_temp, (d, 1))))[::-1]
            temp = np.cumsum(Asor**2)
            ff = np.squeeze(np.where(temp < (1 - nrgthr) * temp[-1]))    
            if ff.size > 0:
                if ff.ndim == 0:
                    ind = ff
                else:
                    ind = ff[-1]
                A_temp[A_temp < Asor[ind]] = 0
                BW = (A_temp >= Asor[ind])
            else:
                BW = (A_temp >= 0)

        Ath = np.squeeze(np.reshape(A_temp, (d, 1)))
        Ath2 = np.zeros((d))
        BW = binary_closing(BW.astype(np.int), structure=se)
        if extract_cc:        
            labeled_array, num_features = label(BW, structure=ss)
            BW = np.reshape(BW, (d, 1))
            labeled_array = np.squeeze(np.reshape(labeled_array, (d, 1)))
            nrg = np.zeros((num_features, 1))
            for j in range(num_features):
                nrg[j] = np.sum(Ath[labeled_array == j + 1]**2)

            indm = np.argmax(nrg)         
            #Ath2[labeled_array == indm + 1] = A_i[labeled_array == indm + 1]
            Ath2[labeled_array == indm + 1] = Ath[labeled_array == indm + 1]
        else:            
            BW = BW.flatten()
            Ath2[BW] = Ath[BW]

        return Ath2, i

#%% lars_regression_noise