Python cv2 模块,ADAPTIVE_THRESH_MEAN_C 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用cv2.ADAPTIVE_THRESH_MEAN_C

项目:reconstruction    作者:microelly2    | 项目源码 | 文件源码
def execute_Threshold(proxy,obj):

    try: img=obj.sourceObject.Proxy.img.copy()
    except: img=cv2.imread(__dir__+'/icons/freek.png')

    # img = cv2.imread('dave.jpg',0) ??
    img = cv2.medianBlur(img,5)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)


    if obj.globalThresholding:
        ret,th1 = cv2.threshold(img,obj.param1,obj.param2,cv2.THRESH_BINARY)
        obj.Proxy.img = cv2.cvtColor(th1, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveMeanTresholding:
        th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
                cv2.THRESH_BINARY,11,2)
        obj.Proxy.img = cv2.cvtColor(th2, cv2.COLOR_GRAY2RGB)

    if obj.adaptiveGaussianThresholding:
        th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,17,2)
        obj.Proxy.img = cv2.cvtColor(th3, cv2.COLOR_GRAY2RGB)
项目:WebAct    作者:CreatCodeBuild    | 项目源码 | 文件源码
def threshold(im_gray, method):
    '''
    ??????????thresholding???????????
    ??????thresholding????????OpenCV??
    '''
    if method == 'fixed':
        threshed_im = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY)

    elif method == 'mean':
        threshed_im = cv2.adaptiveThreshold(im_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -22)

    elif method == 'gaussian':
        threshed_im = cv2.adaptiveThreshold(im_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 7)

    else:
        return None

    return threshed_im
项目:WebAct    作者:CreatCodeBuild    | 项目源码 | 文件源码
def threshold(im_gray, method):
    '''
    ??????????thresholding???????????
    ??????thresholding????????OpenCV??
    '''
    if method == 'fixed':
        threshed_im = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY)

    elif method == 'mean':
        threshed_im = cv2.adaptiveThreshold(im_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -22)

    elif method == 'gaussian':
        threshed_im = cv2.adaptiveThreshold(im_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 7)

    else:
        return None

    return threshed_im
项目:opencv-helpers    作者:abarrak    | 项目源码 | 文件源码
def adaptive_threshold(image, above_thresh_assigned=255, kind='mean', cell_size=35, c_param=17,
                       thresh_style=cv.THRESH_BINARY_INV):
  '''
  :param kind: specify adaptive method, whether 'mean' or 'gaussian'.
  :param cell_size: n for the region size (n x n).
  :param c_param: subtraction constant.
  :return: a binary version of the input image.
  '''
  if kind == 'mean':
    method = cv.ADAPTIVE_THRESH_MEAN_C
  elif kind == 'gaussian':
    method = cv.ADAPTIVE_THRESH_GAUSSIAN_C
  else:
    raise ValueError('Unknown adaptive threshold method.')

  return cv.adaptiveThreshold(image, above_thresh_assigned, method, thresh_style, cell_size, c_param)
项目:QRCodeReader    作者:Griffintaur    | 项目源码 | 文件源码
def __convertImagetoBlackWhite(self):
        self.Image = cv.imread(self.ImagePath, cv.IMREAD_COLOR)
        self.imageOriginal = self.Image
        if self.Image is None:
            print 'some problem with the image'
        else:
            print 'Image Loaded'

        self.Image = cv.cvtColor(self.Image, cv.COLOR_BGR2GRAY)
        self.Image = cv.adaptiveThreshold(
            self.Image,
            255,                    # Value to assign
            cv.ADAPTIVE_THRESH_MEAN_C,# Mean threshold
            cv.THRESH_BINARY,
            11,                     # Block size of small area
            2,                      # Const to substract
        )

        return self.Image
项目:HandwritingRecognition    作者:eng-tsmith    | 项目源码 | 文件源码
def thresholding(img_grey):
    """
    This functions creates binary images using thresholding
    :param img_grey: greyscale image
    :return: binary image
    """
    # # Global
    # ret1, thresh1 = cv.threshold(img_grey, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh1)
    #
    # # Adaptive Mean
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
    # ret2, thresh2 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh2)
    #
    # # Adaptive Gaussian
    # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
    # ret3, thresh3 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh3)

    # Otsu's thresholding after Gaussian filtering
    blur = cv.GaussianBlur(img_grey, (5, 5), 0)
    ret4, img_otsu = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
    ret4, thresh4 = cv.threshold(img_otsu, 127, 255, cv.THRESH_BINARY_INV)
    # show_img(thresh4)

    return thresh4
项目:opencv-plgs    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, snap, img, para = None):
        med = cv2.ADAPTIVE_THRESH_MEAN_C if para['med']=='mean' else cv2.ADAPTIVE_THRESH_GAUSSIAN_C
        mtype = cv2.THRESH_BINARY_INV if para['inv'] else cv2.THRESH_BINARY
        cv2.adaptiveThreshold(snap, para['max'], med, para['inv'], para['size'], para['offset'], dst=img)
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def logoDetect(img,imgo):
    '''???????????????'''
    imglogo=imgo.copy()
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
    #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
    ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
    img=cv2.Canny(img,100,200)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    img = cv2.dilate(img, element2,iterations = 1)
    img = cv2.erode(img, element1, iterations = 3)
    img = cv2.dilate(img, element2,iterations = 3)

    #????
    im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    tema=0
    result=[]
    for con in contours:
        x,y,w,h=cv2.boundingRect(con)
        area=w*h
        ratio=max(w/h,h/w)
        if area>300 and area<20000 and ratio<2:
            if area>tema:
                tema=area
                result=[x,y,w,h]
                ratio2=ratio
    #?????????????????,??????????
    logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
    logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
    cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
    cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
    print tema,ratio2,result
    logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
    cv2.imwrite('./logo2.jpg',logo2)

    return img
项目:Brewereader    作者:ceafdc    | 项目源码 | 文件源码
def find_lines(img, acc_threshold=0.25, should_erode=True):
    if len(img.shape) == 3 and img.shape[2] == 3:  # if it's color
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img = cv2.GaussianBlur(img, (11, 11), 0)
    img = cv2.adaptiveThreshold(
            img,
            255,
            cv2.ADAPTIVE_THRESH_MEAN_C,
            cv2.THRESH_BINARY,
            5,
            2)

    img = cv2.bitwise_not(img)

    # thresh = 127
    # edges = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
    # edges = cv2.Canny(blur, 500, 500, apertureSize=3)

    if should_erode:
        element = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4))
        img = cv2.erode(img, element)

    theta = np.pi/2000
    angle_threshold = 2
    horizontal = cv2.HoughLines(
            img,
            1,
            theta,
            int(acc_threshold * img.shape[1]),
            min_theta=np.radians(90 - angle_threshold),
            max_theta=np.radians(90 + angle_threshold))
    vertical = cv2.HoughLines(
            img,
            1,
            theta,
            int(acc_threshold * img.shape[0]),
            min_theta=np.radians(-angle_threshold),
            max_theta=np.radians(angle_threshold),
            )

    horizontal = list(horizontal) if horizontal is not None else []
    vertical = list(vertical) if vertical is not None else []

    horizontal = [line[0] for line in horizontal]
    vertical = [line[0] for line in vertical]

    horizontal = np.asarray(horizontal)
    vertical = np.asarray(vertical)

    return horizontal, vertical
项目:image_text_reader    作者:yardstick17    | 项目源码 | 文件源码
def remove_noise_and_smooth(file_name):
    logging.info('Removing noise and smoothening image')
    img = cv2.imread(file_name, 0)
    filtered = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 41, 3)
    kernel = np.ones((1, 1), np.uint8)
    opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    img = image_smoothening(img)
    or_image = cv2.bitwise_or(img, closing)
    return or_image
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def adaptive_thresholding(img):
  # adaptive mean binary threshold
  th4 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
  # adaptive gaussian thresholding
  th5 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
  return th5
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def render(self,frame):
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 7
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 7)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 7)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        # -- STEP 5 --
        # convert back to color so that it can be bit-ANDed with color image
        img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
        final = cv2.bitwise_and(img_color, img_edge)
        return cv2.medianBlur(final,7)
项目:PicFilter    作者:dhuadaar    | 项目源码 | 文件源码
def render(self,frame):
        canvas = cv2.imread("pen.jpg", cv2.CV_8UC1)
        numDownSamples = 2
        img_rgb = frame
        # number of downscaling steps
        numBilateralFilters = 3
        # number of bilateral filtering steps
        # -- STEP 1 --
        # downsample image using Gaussian pyramid
        img_color = img_rgb
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrDown(img_color)
        # repeatedly apply small bilateral filter instead of applying
        # one large filter
        for _ in xrange(numBilateralFilters):
            img_color = cv2.bilateralFilter(img_color, 9, 9, 3)

        # upsample image to original size
        for _ in xrange(numDownSamples):
            img_color = cv2.pyrUp(img_color)
        # convert to grayscale and apply median blur
        img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
        img_blur = cv2.medianBlur(img_gray, 3)

        # detect and enhance edges
        img_edge = cv2.adaptiveThreshold(img_blur, 255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 9, 2)
        return  cv2.multiply(cv2.medianBlur(img_edge,7), canvas, scale=1./256)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def find_concetric_circles(gray_img,min_ring_count=3, visual_debug=False):

    # get threshold image used to get crisp-clean edges using blur to remove small features
    edges = cv2.adaptiveThreshold(cv2.blur(gray_img,(3,3)), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 11)
    _, contours, hierarchy = cv2.findContours(edges,
                                    mode=cv2.RETR_TREE,
                                    method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
    if visual_debug is not False:
        cv2.drawContours(visual_debug,contours,-1,(200,0,0))
    if contours is None or hierarchy is None:
        return []
    clusters = get_nested_clusters(contours,hierarchy[0],min_nested_count=min_ring_count)
    concentric_cirlce_clusters = []

    #speed up code by caching computed ellipses
    ellipses = {}

    # for each cluster fit ellipses and cull members that dont have good ellipse fit
    for cluster in clusters:
        if visual_debug is not False:
            cv2.drawContours(visual_debug, [contours[i] for i in cluster],-1, (0,0,255))
        candidate_ellipses = []
        for i in cluster:
            c = contours[i]
            if len(c)>5:
                if not i in ellipses:
                    e = cv2.fitEllipse(c)
                    fit = max(dist_pts_ellipse(e,c))
                    ellipses[i] = e,fit
                else:
                    e,fit = ellipses[i]
                a,b = e[1][0]/2.,e[1][1]/2.
                if fit<max(2,max(e[1])/20):
                    candidate_ellipses.append(e)
                    if visual_debug is not False:
                        cv2.ellipse(visual_debug, e, (0,255,0),1)

        if candidate_ellipses:
            cluster_center = np.mean(np.array([e[0] for e in candidate_ellipses]),axis=0)
            candidate_ellipses = [e for e in candidate_ellipses if np.linalg.norm(e[0]-cluster_center)<max(3,min(e[1])/20) ]
            if len(candidate_ellipses) >= min_ring_count:
                concentric_cirlce_clusters.append(candidate_ellipses)
                if visual_debug is not False:
                    cv2.ellipse(visual_debug, candidate_ellipses[-1], (0,255,255),4)

    #return clusters sorted by size of outmost cirlce biggest first.
    return sorted(concentric_cirlce_clusters,key=lambda e:-max(e[-1][1]))
项目:Vision-based-parking-lot-availability-OpenCV    作者:Saar1312    | 项目源码 | 文件源码
def thresholdImage(gray,thr_type,thr,block_size=None,img=None):

    """ Where thr_type in {1,2,3,4}
        1: Normal threshold
        2: Otsu
        3: Adaptive (mean)
        4: Adaptive (Gaussian)
        More thresholds: Using two thresholds taking into account that most pixels are from the floor 
            (Trying to don't erase black cars)
        5: Double threshold (using percentiles) 
        6: Double threshold (using manually set values)
    """
    if thr_type == 1: 
        ret,thr = cv2.threshold(gray,thr,255,cv2.THRESH_BINARY)
        return thr
    elif thr_type == 2:
        ret,thr = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) # Black/red cars desapeared. Good for Segmentation of background
        return thr
    elif thr_type == 3:
        return cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,block_size,2) # Less noise, but can't recognize all cars
    elif thr_type == 4:
        return cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,block_size,2) # More noise, more cars
    elif thr_type == 5:
        firstQ = np.percentile(gray2,65) # Return de value of the pixel that corresponds to the 65 percent of all sorted pixels in grayscale
        secondQ = np.percentile(gray2,50)
        thirdQ = np.percentile(gray2,35)
        return applyThreshold(gray,firstQ,thirdQ)
    elif thr_type == 6:
        return applyThreshold(gray,40,113)
    elif thr_type == 7:
        rows,col = img[:,:,0].shape
        r1,g1,b1 = getChannels(gray) # Actually is not grayscale but a BGR image (just a name)
        r2,g2,b2 = getChannels(img)
        res = np.zeros((rows,col))
        for i in range(0,rows):
            for j in range(0,col):
                rDif = abs(int(r1[i,j]) - int(r2[i,j]))
                gDif = abs(int(g1[i,j]) - int(g2[i,j]))
                bDif = abs(int(b1[i,j]) - int(b2[i,j]))
                if rDif >= thr or gDif >= thr or bDif >= thr:
                    res[i,j] = 0
                else:
                    res[i,j] = 255
        return res

    else:
        return None
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def get_mask(name, small, pagemask, masktype):

    sgray = cv2.cvtColor(small, cv2.COLOR_RGB2GRAY)

    if masktype == 'text':

        mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY_INV,
                                     ADAPTIVE_WINSZ,
                                     25)

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.1, 'thresholded', mask)

        mask = cv2.dilate(mask, box(9, 1))

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.2, 'dilated', mask)

        mask = cv2.erode(mask, box(1, 3))

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.3, 'eroded', mask)

    else:

        mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY_INV,
                                     ADAPTIVE_WINSZ,
                                     7)

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.4, 'thresholded', mask)

        mask = cv2.erode(mask, box(3, 1), iterations=3)

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.5, 'eroded', mask)

        mask = cv2.dilate(mask, box(8, 2))

        if DEBUG_LEVEL >= 3:
            debug_show(name, 0.6, 'dilated', mask)

    return np.minimum(mask, pagemask)
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def remap_image(name, img, small, page_dims, params):

    height = 0.5 * page_dims[1] * OUTPUT_ZOOM * img.shape[0]
    height = round_nearest_multiple(height, REMAP_DECIMATE)

    width = round_nearest_multiple(height * page_dims[0] / page_dims[1],
                                   REMAP_DECIMATE)

    print '  output will be {}x{}'.format(width, height)

    height_small = height / REMAP_DECIMATE
    width_small = width / REMAP_DECIMATE

    page_x_range = np.linspace(0, page_dims[0], width_small)
    page_y_range = np.linspace(0, page_dims[1], height_small)

    page_x_coords, page_y_coords = np.meshgrid(page_x_range, page_y_range)

    page_xy_coords = np.hstack((page_x_coords.flatten().reshape((-1, 1)),
                                page_y_coords.flatten().reshape((-1, 1))))

    page_xy_coords = page_xy_coords.astype(np.float32)

    image_points = project_xy(page_xy_coords, params)
    image_points = norm2pix(img.shape, image_points, False)

    image_x_coords = image_points[:, 0, 0].reshape(page_x_coords.shape)
    image_y_coords = image_points[:, 0, 1].reshape(page_y_coords.shape)

    image_x_coords = cv2.resize(image_x_coords, (width, height),
                                interpolation=cv2.INTER_CUBIC)

    image_y_coords = cv2.resize(image_y_coords, (width, height),
                                interpolation=cv2.INTER_CUBIC)

    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    remapped = cv2.remap(img_gray, image_x_coords, image_y_coords,
                         cv2.INTER_CUBIC,
                         None, cv2.BORDER_REPLICATE)

    thresh = cv2.adaptiveThreshold(remapped, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                   cv2.THRESH_BINARY, ADAPTIVE_WINSZ, 25)

    pil_image = Image.fromarray(thresh)
    pil_image = pil_image.convert('1')

    threshfile = name + '_thresh.png'
    pil_image.save(threshfile, dpi=(OUTPUT_DPI, OUTPUT_DPI))

    if DEBUG_LEVEL >= 1:
        height = small.shape[0]
        width = int(round(height * float(thresh.shape[1])/thresh.shape[0]))
        display = cv2.resize(thresh, (width, height),
                             interpolation=cv2.INTER_AREA)
        debug_show(name, 6, 'output', display)

    return threshfile
项目:event-Python    作者:gorchard    | 项目源码 | 文件源码
def show_em(self):
        """Displays the EM events (grayscale ATIS events)"""
        frame_length = 24e3
        t_max = self.data.ts[-1]
        frame_start = self.data[0].ts
        frame_end = self.data[0].ts + frame_length
        max_val = 1.16e5
        min_val = 1.74e3
        val_range = max_val - min_val

        thr = np.rec.array(None, dtype=[('valid', np.bool_), ('low', np.uint64), ('high', np.uint64)], shape=(self.height, self.width))
        thr.valid.fill(False)
        thr.low.fill(frame_start)
        thr.high.fill(0)

        def show_em_frame(frame_data):
            """Prepare and show a single frame of em data to be shown"""
            for datum in np.nditer(frame_data):
                ts_val = datum['ts'].item(0)
                thr_data = thr[datum['y'].item(0), datum['x'].item(0)]

                if datum['p'].item(0) == 0:
                    thr_data.valid = 1
                    thr_data.low = ts_val
                elif thr_data.valid == 1:
                    thr_data.valid = 0
                    thr_data.high = ts_val - thr_data.low

            img = 255 * (1 - (thr.high - min_val) / (val_range))
            #thr_h = cv2.adaptiveThreshold(thr_h, 255,
            #cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 0)
            img = np.piecewise(img, [img <= 0, (img > 0) & (img < 255), img >= 255], [0, lambda x: x, 255])
            img = img.astype('uint8')
            cv2.imshow('img', img)
            cv2.waitKey(1)

        while frame_start < t_max:
            #with timer.Timer() as em_playback_timer:
            frame_data = self.data[(self.data.ts >= frame_start) & (self.data.ts < frame_end)]
            show_em_frame(frame_data)
            frame_start = frame_end + 1
            frame_end += frame_length + 1
            #print 'showing em frame took %s seconds' %em_playback_timer.secs

        cv2.destroyAllWindows()
        return