Python cv2 模块,convertScaleAbs() 实例源码

我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用cv2.convertScaleAbs()

项目:saliency    作者:shuuchen    | 项目源码 | 文件源码
def N(image):
    """
        Normalization parameter as per Itti et al. (1998).
        returns a normalized feature map image.
    """
    M = 8.  # an arbitrary global maximum to which the image is scaled.
            # (When saving saliency maps as images, pixel values may become
            # too large or too small for the chosen image format depending
            # on this constant)
    image = cv2.convertScaleAbs(image, alpha=M/image.max(), beta=0.)
    w,h = image.shape
    maxima = maximum_filter(image, size=(w/10,h/1))
    maxima = (image == maxima)
    mnum = maxima.sum()
    logger.debug("Found %d local maxima.", mnum)
    maxima = numpy.multiply(maxima, image)
    mbar = float(maxima.sum()) / mnum
    logger.debug("Average of local maxima: %f.  Global maximum: %f", mbar, M)
    return image * (M-mbar)**2
项目:BlurDetection    作者:whdcumt    | 项目源码 | 文件源码
def blur_mask(img):
    assert isinstance(img, numpy.ndarray), 'img_col must be a numpy array'
    assert img.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img.ndim)
    msk, val, blurry = main.blur_detector(img)
    logger.debug('inverting img_fft')
    msk = cv2.convertScaleAbs(255-(255*msk/numpy.max(msk)))
    msk[msk < 50] = 0
    msk[msk > 127] = 255
    logger.debug('removing border')
    msk = remove_border(msk)
    logger.debug('applying erosion and dilation operators')
    msk = morphology(msk)
    logger.debug('evaluation complete')
    result = numpy.sum(msk)/(255.0*msk.size)
    logger.info('{0}% of input image is blurry'.format(int(100*result)))
    return msk, result, blurry
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_sobel_binary(im, blur_sz):

    # ??????????????
    img_blur = cv2.GaussianBlur(im,blur_sz,0)
    if len(img_blur.shape) == 3:
        blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY) 
    else:
        blur_gray = img_blur

    # ??Sobel????
    sobelx = cv2.Sobel(blur_gray,cv2.CV_16S,1,0,ksize=3)
    abs_sobelx = np.absolute(sobelx)
    sobel_8u = np.uint8(abs_sobelx)
    img_show_hook("Sobel??", sobel_8u)

    # OTSU??????    
    ret, thd = cv2.threshold(sobel_8u, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)    
    thd_abs = cv2.convertScaleAbs(thd)
    bgimg = cv2.addWeighted(thd_abs, 1, 0, 0, 0)

    img_show_hook("OTSU????", bgimg)

    return bgimg
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_sobel_binary(im, blur_sz):

    # ??????????????
    img_blur = cv2.GaussianBlur(im,blur_sz,0)
    if len(img_blur.shape) == 3:
        blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY) 
    else:
        blur_gray = img_blur

    # ??Sobel????
    sobelx = cv2.Sobel(blur_gray,cv2.CV_16S,1,0,ksize=3)
    abs_sobelx = np.absolute(sobelx)
    sobel_8u = np.uint8(abs_sobelx)
    img_show_hook("Sobel??", sobel_8u)

    # OTSU??????    
    ret, thd = cv2.threshold(sobel_8u, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)    
    thd_abs = cv2.convertScaleAbs(thd)
    bgimg = cv2.addWeighted(thd_abs, 1, 0, 0, 0)

    img_show_hook("OTSU????", bgimg)

    return bgimg
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def normalized(self):

#        t1=time.time()
        b=self.down[:,:,0]
        g=self.down[:,:,1]
        r=self.down[:,:,2]

        sum=b+g+r


        self.norm[:,:,0]=b/sum*255.0
        self.norm[:,:,1]=g/sum*255.0
        self.norm[:,:,2]=r/sum*255.0

 #       print "conversion time",time.time()-t1

        #self.norm=cv2.merge([self.norm1,self.norm2,self.norm3])
        self.norm_rgb=cv2.convertScaleAbs(self.norm)
        #self.norm.dtype=np.uint8
        return self.norm_rgb
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def detect_shirt(self):


        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)

        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)

        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)

        return self.m,cntr
项目:answer-sheet-scan    作者:inuyasha2012    | 项目源码 | 文件源码
def get_init_process_img(roi_img):
    """
    ?????????????????????????????????????
    :param roi_img: ndarray
    :return: ndarray
    """
    h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1)
    v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1)
    img = cv2.add(h, v)
    img = cv2.convertScaleAbs(img)
    img = cv2.GaussianBlur(img, (3, 3), 0)
    ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    kernel = np.ones((1, 1), np.uint8)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = cv2.erode(img, kernel, iterations=1)
    img = cv2.dilate(img, kernel, iterations=2)
    img = auto_canny(img)
    return img
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    graysrc = cv2.cvtColor(colorsrc, cv2.cv.CV_BGR2GRAY)
    graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

    ## gradient X ##
    gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
    gradx = cv2.convertScaleAbs(gradx)

    ## gradient Y ##
    grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
    grady = cv2.convertScaleAbs(grady)

    grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

    return grad
项目:py2DIC    作者:Geod-Geom    | 项目源码 | 文件源码
def template_match(img_master, img_slave, method = 'cv2.TM_CCOEFF_NORMED', mlx = 1, mly = 1, show=True):    

    # Apply image oversampling 
    img_master = cv2.resize(img_master,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC)
    img_slave  = cv2.resize(img_slave,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC)

    res = cv2.matchTemplate(img_slave,img_master,eval(method))

    w, h = img_master.shape[::-1]    
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    # Control if the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum value
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc 
    bottom_right = (top_left[0] + w, top_left[1] + h)

    # Retrieve center coordinates
    px = (top_left[0]+bottom_right[0])/(2.0*mlx)
    py = (top_left[1]+bottom_right[1])/(2.0*mly)

    # Scale images for visualization
    img_master_scaled = cv2.convertScaleAbs(img_master, alpha=(255.0/500))
    img_slave_scaled = cv2.convertScaleAbs(img_slave, alpha=(255.0/500))

    cv2.rectangle(img_slave_scaled,top_left, bottom_right, 255, 2*mlx) 

    if show == True:
        plt.figure(figsize=(20,10))
        plt.subplot(131),plt.imshow(res,cmap = 'gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.subplot(132),plt.imshow(img_master_scaled,cmap = 'gray')
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.subplot(133),plt.imshow(img_slave_scaled, cmap = 'gray')
        plt.suptitle(method)
        plt.show()

    return px, py, max_val
项目:vehicle_detection    作者:AuzanMuh    | 项目源码 | 文件源码
def backgroundSubtractionAverage(frame_ori, avg, alpha):
    accuWeight = cv2.accumulateWeighted(frame_ori, avg, alpha)
    cvtScaleAbs = cv2.convertScaleAbs(accuWeight)
    return cvtScaleAbs
项目:recognizeFitExercise    作者:tyiannak    | 项目源码 | 文件源码
def getRGBS(img, PLOT = False):

    image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)

    # grab the image channels, initialize the tuple of colors,
    # the figure and the flattened feature vector   
    features = []
    featuresSobel = []
    Grayscale = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
    histG = cv2.calcHist([Grayscale], [0], None, [16], [0, 256])
    histG = histG / histG.sum()
    features.extend(histG[:,0].tolist())


    grad_x = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 1, 0, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
    grad_y = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 0, 1, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
    abs_grad_x = cv2.convertScaleAbs(grad_x)
    abs_grad_y = cv2.convertScaleAbs(grad_y)
    dst = cv2.addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0)
    histSobel = cv2.calcHist([dst], [0], None, [16], [0, 256])
    histSobel = histSobel / histSobel.sum()
    features.extend(histSobel[:,0].tolist())

    Fnames = []
    Fnames.extend(["Color-Gray"+str(i) for i in range(8)])
    Fnames.extend(["Color-GraySobel"+str(i) for i in range(8)])

    return features, Fnames
项目:GidroGraf-Sirius    作者:alf3r    | 项目源码 | 文件源码
def convert_range(self):
        for i in range(1,30):
            alpha = 1*i
            a = cv2.convertScaleAbs(self.data, alpha=alpha, beta=0)
            beta = 127 - np.median(a, [0, 1])
            a = cv2.convertScaleAbs(self.data, alpha=alpha, beta=beta)

            condition = np.mod(a, 255) == 0
            K = np.sum(condition) / a.size
            if K > 0.1:
                break
        self.data = a
项目:GidroGraf-Sirius    作者:alf3r    | 项目源码 | 文件源码
def convert_range(data):
    # dst=cv2.convertScaleAbs(src=data, alpha=5000, beta=0)
    clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(20, 20))
    dst = clahe.apply(data)
    return dst
项目:eclipse2017    作者:google    | 项目源码 | 文件源码
def paintGL(self, sun_x, sun_y, sun_z, moon_x, moon_y, moon_z):
        # Draw the sun
        self.fbo.bind()
        self.draw_sun(sun_x, sun_y, sun_z)
        glFlush()
        self.fbo.release()
        image = self.fbo.toImage()

        # Produce blurred image of sun
        npimage = qimage_to_numpy(image)
        h, w, b = npimage.shape
        blur = cv2.GaussianBlur(npimage, (75, 75), 0, 0)
        cv2.convertScaleAbs(blur, blur, 2, 1)
        # Combine the blurred with the sun
        combo = cv2.addWeighted(blur, 0.5, npimage, 0.5, -1)
        h, w, b = combo.shape
        qimage = QtGui.QImage(combo.data,w,h,QtGui.QImage.Format_ARGB32).rgbSwapped()
        self.fbo.bind()
        device = QtGui.QOpenGLPaintDevice(RES_X, RES_Y)
        painter = QtGui.QPainter()
        painter.begin(device)
        rect = QtCore.QRect(0, 0, RES_X, RES_Y)
        # Draw the blurred sun/sun combo image on the screen
        painter.drawImage(rect, qimage, rect)
        painter.end()
        self.fbo.release()

        # Draw the moon
        self.fbo.bind()
        self.draw_moon(moon_x, moon_y, moon_z)
        glFlush()
        self.fbo.release()
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def _get_gradient_magnitude(im):
    "Get magnitude of gradient for given image"
    ddepth = cv2.CV_32F
    dx = cv2.Sobel(im, ddepth, 1, 0)
    dy = cv2.Sobel(im, ddepth, 0, 1)
    dxabs = cv2.convertScaleAbs(dx)
    dyabs = cv2.convertScaleAbs(dy)
    mag = cv2.addWeighted(dxabs, 0.5, dyabs, 0.5, 0)

    return np.average(mag)
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def detect_shirt2(self):
        self.hsv=cv2.cvtColor(self.norm_rgb,cv.CV_BGR2HSV)
        self.hue,s,_=cv2.split(self.hsv)

        _,self.dst=cv2.threshold(self.hue,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        self.fg=cv2.erode(self.dst,None,iterations=3)
        self.bg=cv2.dilate(self.dst,None,iterations=1)
        _,self.bg=cv2.threshold(self.bg,1,128,1)
        mark=cv2.add(self.fg,self.bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)

        m=cv2.convertScaleAbs(mark32)
        _,m=cv2.threshold(m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

        cntr,h=cv2.findContours(m,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        print len(cntr)
        #print cntr[0].shape
        #cntr[1].dtype=np.float32
        #ret=cv2.contourArea(np.array(cntr[1]))
        #print ret
        #cntr[0].dtype=np.uint8
        cv2.drawContours(m,cntr,-1,(255,255,255),3)
        cv2.imshow("mask_fg",self.fg)
        cv2.imshow("mask_bg",self.bg)
        cv2.imshow("mark",m)
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def subtract_back(self,frm):
        #dst=self.__back__-self.__foreground__
        temp=np.zeros((600,800),np.uint8)

        self.__foreground__=cv2.blur(self.__foreground__,(3,3))
        dst=cv2.absdiff(self.__back__,self.__foreground__)

        #dst=cv2.adaptiveThreshold(dst,255,cv.CV_THRESH_BINARY,cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,5,10)
        val,dst=cv2.threshold(dst,0,255,cv.CV_THRESH_BINARY+cv.CV_THRESH_OTSU)

        fg=cv2.erode(dst,None,iterations=1)
        bg=cv2.dilate(dst,None,iterations=4)

        _,bg=cv2.threshold(bg,1,128,1)

        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        #dst.copy(temp)

        #seq=cv.FindContours(cv.fromarray(dst),self.mem,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        #cntr,h=cv2.findContours(dst,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
        #print cntr,h
        #cv.DrawContours(cv.fromarray(temp),seq,(255,255,255),(255,255,255),1,cv.CV_FILLED)
        cv2.watershed(frm, mark32)
        self.final_mask=cv2.convertScaleAbs(mark32)
        #print temp

        #--outputs---
        #cv2.imshow("subtraction",fg)
        #cv2.imshow("thres",dst)
        #cv2.imshow("thres1",bg)
        #cv2.imshow("mark",mark)
        #cv2.imshow("final",self.final_mask)
项目:SuperResolution_Caffe    作者:BobLiu20    | 项目源码 | 文件源码
def __image_generator(self):
        def id_generator(size=16, max_letter=6):
            _str = ''
            _letter_cnt = 0
            for i in range(size):
                if _letter_cnt < max_letter:
                    _c = random.choice(string.ascii_uppercase + string.digits)
                    if _c in string.ascii_uppercase:
                        _letter_cnt += 1
                else:
                    _c = random.choice(string.digits)
                _str += _c
            return _str
        def blur_method(_im, m):
            if m == 0:
                return _im
            elif m == 1:
                return cv2.GaussianBlur(_im, (5, 5), 0)
            elif m == 2:
                return cv2.blur(_im, (5,5))
            elif m == 3:
                return cv2.medianBlur(_im, 5)
            else:
                return _im
        def brightness(_im):
            _brightness_offset = np.random.randint(-50, 50)
            return cv2.convertScaleAbs(_im, alpha=1, beta=_brightness_offset)

        _dmtx = DMTX(shape=3)# shape=3 is 16x16
        while True:
            # 022RDXBTH4001093
            _str = id_generator(16, 2)
            _dmtx.encode(_str)
            _im = np.array(_dmtx.image)# [:,:,::-1]
            _im = cv2.cvtColor(_im, cv2.COLOR_RGB2GRAY)
            _im = cv2.resize(_im, (self.im_shape[1], self.im_shape[0]))
            _h, _w = _im.shape[:2]
            # random mirco rotation
            _angle = np.random.randint(-6, 6) / 2.0
            _rot_mat = cv2.getRotationMatrix2D((_w / 2, _h / 2), _angle, 1)
            _im = cv2.warpAffine(_im, _rot_mat, (_w, _h))
            # get label
            _h2, _w2 = self.la_shape
            _label = _im[(_h-_h2)/2:-(_h-_h2)/2, (_w-_w2)/2:-(_w-_w2)/2]
            # low-resolution
            _scale = np.random.choice(range(1, 6))
            _im = cv2.resize(_im, (0,0), fx=1/float(_scale), fy=1/float(_scale))
            _im = cv2.resize(_im, (self.im_shape[1], self.im_shape[0]))
            # add noise
            _im = blur_method(_im, np.random.choice(range(0, 4)))
            _im = brightness(_im)
            # to caffe data format
            _im = _im.astype(np.float32, copy=False)
            _label = _label.astype(np.float32, copy=False)
            _im *= 0.0039215684
            _label *= 0.0039215684

            yield _im, _label
项目:SuperResolution_Caffe    作者:BobLiu20    | 项目源码 | 文件源码
def __image_generator(self):
        def id_generator(size=16, max_letter=6):
            _str = ''
            _letter_cnt = 0
            for i in range(size):
                if _letter_cnt < max_letter:
                    _c = random.choice(string.ascii_uppercase + string.digits)
                    if _c in string.ascii_uppercase:
                        _letter_cnt += 1
                else:
                    _c = random.choice(string.digits)
                _str += _c
            return _str
        def blur_method(_im, m):
            if m == 0:
                return _im
            elif m == 1:
                return cv2.GaussianBlur(_im, (5, 5), 0)
            elif m == 2:
                return cv2.blur(_im, (5,5))
            elif m == 3:
                return cv2.medianBlur(_im, 5)
            else:
                return _im
        def brightness(_im):
            _brightness_offset = np.random.randint(-50, 50)
            return cv2.convertScaleAbs(_im, alpha=1, beta=_brightness_offset)

        _dmtx = DMTX(shape=3)# shape=3 is 16x16
        while True:
            # 022RDXBTH4001093
            _str = id_generator(16, 2)
            _dmtx.encode(_str)
            _im = np.array(_dmtx.image)# [:,:,::-1]
            _im = cv2.cvtColor(_im, cv2.COLOR_RGB2GRAY)
            _im = cv2.resize(_im, (self.im_shape[1], self.im_shape[0]), interpolation=cv2.INTER_CUBIC)
            _h, _w = _im.shape[:2]
            # random mirco rotation
            _angle = np.random.randint(-6, 6) / 2.0
            _rot_mat = cv2.getRotationMatrix2D((_w / 2, _h / 2), _angle, 1)
            _im = cv2.warpAffine(_im, _rot_mat, (_w, _h))
            # get label
            _label = cv2.resize(_im, (self.la_shape[1], self.la_shape[0]), interpolation=cv2.INTER_CUBIC)
            # low-resolution
            _scale = np.random.choice(range(1, 6))
            _im = cv2.resize(_im, (0,0), fx=1/float(_scale), fy=1/float(_scale), interpolation=cv2.INTER_CUBIC)
            _im = cv2.resize(_im, (self.im_shape[1], self.im_shape[0]), interpolation=cv2.INTER_CUBIC)
            # add noise
            _im = blur_method(_im, np.random.choice(range(0, 4)))
            _im = brightness(_im)
            # to caffe data format
            _im = _im.astype(np.float32, copy=False)
            _label = _label.astype(np.float32, copy=False)
            _im *= 0.0039215684
            _label *= 0.0039215684

            yield _im, _label
项目:SuperResolution_Caffe    作者:BobLiu20    | 项目源码 | 文件源码
def __image_generator(self):
        def id_generator(size=16, max_letter=6):
            _str = ''
            _letter_cnt = 0
            for i in range(size):
                if _letter_cnt < max_letter:
                    _c = random.choice(string.ascii_uppercase + string.digits)
                    if _c in string.ascii_uppercase:
                        _letter_cnt += 1
                else:
                    _c = random.choice(string.digits)
                _str += _c
            return _str
        def blur_method(_im, m):
            if m == 0:
                return _im
            elif m == 1:
                return cv2.GaussianBlur(_im, (5, 5), 0)
            elif m == 2:
                return cv2.blur(_im, (5,5))
            elif m == 3:
                return cv2.medianBlur(_im, 5)
            else:
                return _im
        def brightness(_im):
            _brightness_offset = np.random.randint(-50, 50)
            return cv2.convertScaleAbs(_im, alpha=1, beta=_brightness_offset)

        _dmtx = DMTX(shape=3)# shape=3 is 16x16
        while True:
            # 022RDXBTH4001093
            _str = id_generator(16, 2)
            _dmtx.encode(_str)
            _im = np.array(_dmtx.image)# [:,:,::-1]
            _im = cv2.cvtColor(_im, cv2.COLOR_RGB2GRAY)
            _im = cv2.resize(_im, (self.im_shape[1]-12, self.im_shape[0]-12))
            _h, _w = _im.shape[:2]
            # random mirco rotation
            _angle = np.random.randint(-6, 6) / 2.0
            _rot_mat = cv2.getRotationMatrix2D((_w / 2, _h / 2), _angle, 1)
            _im = cv2.warpAffine(_im, _rot_mat, (_w, _h))
            # get label
            _label = cv2.resize(_im, (self.la_shape[1], self.la_shape[0]))
            # low-resolution
            _scale = np.random.choice(range(1, 6))
            _im = cv2.resize(_im, (0,0), fx=1/float(_scale), fy=1/float(_scale))
            _im = cv2.resize(_im, (self.im_shape[1]-12, self.im_shape[0]-12))
            # add border. Need by net. 112 -> 100
            _im = cv2.copyMakeBorder(_im, 6, 6, 6, 6, cv2.BORDER_REPLICATE)
            # add noise
            _im = blur_method(_im, np.random.choice(range(0, 4)))
            _im = brightness(_im)
            # to caffe data format
            _im = _im.astype(np.float32, copy=False)
            _label = _label.astype(np.float32, copy=False)
            _im *= 0.0039215684
            _label *= 0.0039215684

            yield _im, _label
项目:SuperResolution_Caffe    作者:BobLiu20    | 项目源码 | 文件源码
def __image_generator(self):
        def id_generator(size=16, max_letter=6):
            _str = ''
            _letter_cnt = 0
            for i in range(size):
                if _letter_cnt < max_letter:
                    _c = random.choice(string.ascii_uppercase + string.digits)
                    if _c in string.ascii_uppercase:
                        _letter_cnt += 1
                else:
                    _c = random.choice(string.digits)
                _str += _c
            return _str
        def blur_method(_im, m):
            if m == 0:
                return _im
            elif m == 1:
                return cv2.GaussianBlur(_im, (5, 5), 0)
            elif m == 2:
                return cv2.blur(_im, (5,5))
            elif m == 3:
                return cv2.medianBlur(_im, 5)
            else:
                return _im
        def brightness(_im):
            _brightness_offset = np.random.randint(-50, 50)
            return cv2.convertScaleAbs(_im, alpha=1, beta=_brightness_offset)

        _dmtx = DMTX(shape=3)# shape=3 is 16x16
        while True:
            # 022RDXBTH4001093
            _str = id_generator(16, 2)
            _dmtx.encode(_str)
            _im = np.array(_dmtx.image)# [:,:,::-1]
            _im = cv2.cvtColor(_im, cv2.COLOR_RGB2GRAY)
            _im = cv2.resize(_im, (self.im_shape[1], self.im_shape[0]), interpolation=cv2.INTER_CUBIC)
            _h, _w = _im.shape[:2]
            # random mirco rotation
            _angle = np.random.randint(-6, 6) / 2.0
            _rot_mat = cv2.getRotationMatrix2D((_w / 2, _h / 2), _angle, 1)
            _im = cv2.warpAffine(_im, _rot_mat, (_w, _h))
            # get label
            _label = cv2.resize(_im, (self.la_shape[1], self.la_shape[0]), interpolation=cv2.INTER_CUBIC)
            # low-resolution
            _scale = np.random.choice(range(1, 6))
            _im = cv2.resize(_im, (0,0), fx=1/float(_scale), fy=1/float(_scale), interpolation=cv2.INTER_CUBIC)
            _im = cv2.resize(_im, (self.im_shape[1], self.im_shape[0]), interpolation=cv2.INTER_CUBIC)
            # add noise
            _im = blur_method(_im, np.random.choice(range(0, 4)))
            _im = brightness(_im)
            # to caffe data format
            _im = _im.astype(np.float32, copy=False)
            _label = _label.astype(np.float32, copy=False)
            _im *= 0.0039215684
            _label *= 0.0039215684

            yield _im, _label
项目:cbpt    作者:egrinstein    | 项目源码 | 文件源码
def main():

    cap = cv2.VideoCapture(0)
    ret,frame = cap.read()
    print("MAIN:",frame.shape)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    x,y = 240,320
    pf = ParticleFilter(x,y,frame,n_particles=500,square_size=50,
                            dt=0.20)
    alpha = 0.5
    while(True):        
        ret, frame = cap.read()
        orig = np.array(frame)
        img = frame
        norm_factor = 255.0/np.sum(frame,axis=2)[:,:,np.newaxis]

        frame = frame*norm_factor
        frame = cv2.convertScaleAbs(frame)
        frame = cv2.blur(frame,(5,5))
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        x,y,sq_size,distrib,distrib_control = pf.next_state(frame)
        p1 = (int(y-sq_size),int(x-sq_size))
        p2 = (int(y+sq_size),int(x+sq_size))

        # before resampling
        for (x2,y2,scale2) in distrib_control:
            x2 = int(x2)
            y2 = int(y2)
            cv2.circle(img, (y2,x2), 1, (255,0,0),thickness=10) 
        # after resampling
        for (x1,y1,scale1) in distrib:
            x1 = int(x1)
            y1 = int(y1)
            cv2.circle(img, (y1,x1), 1, (0,0,255),thickness=10) 


        cv2.rectangle(img,p1,p2,(0,0,255),thickness=5)

        cv2.addWeighted(orig, alpha, img, 1 - alpha,0, img)   
        create_legend(img,(40,40),(40,20))

        cv2.imshow('frame',img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break          

    cap.release()
    cv2.destroyAllWindows()
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    # grayscale image
    if len(colorsrc.shape)==2:
        graysrc = cv2.GaussianBlur(colorsrc, (3, 3), 0)

        ## gradient X ##
        gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
        gradx = cv2.convertScaleAbs(gradx)

        ## gradient Y ##
        grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
        grady = cv2.convertScaleAbs(grady)

        grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

        return grad

    # multi-channel image
    else:
        gradx_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        grady_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        for index in range(colorsrc.shape[2]):
            graysrc=colorsrc[:,:,index]
            graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

            ## gradient X ##
            gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
            gradx = cv2.convertScaleAbs(gradx)
            gradx_total=gradx_total+gradx

            ## gradient Y ##
            grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
            grady = cv2.convertScaleAbs(grady)
            grady_total = grady_total + grady

            grad = cv2.addWeighted(gradx_total, 0.5, grady_total, 0.5, 0)

        return grad
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    # grayscale image
    if len(colorsrc.shape)==2:
        graysrc = cv2.GaussianBlur(colorsrc, (3, 3), 0)

        ## gradient X ##
        gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
        gradx = cv2.convertScaleAbs(gradx)

        ## gradient Y ##
        grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
        grady = cv2.convertScaleAbs(grady)

        grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

        return grad

    # multi-channel image
    else:
        gradx_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        grady_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        for index in range(colorsrc.shape[2]):
            graysrc=colorsrc[:,:,index]
            graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

            ## gradient X ##
            gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
            gradx = cv2.convertScaleAbs(gradx)
            gradx_total=gradx_total+gradx

            ## gradient Y ##
            grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
            grady = cv2.convertScaleAbs(grady)
            grady_total = grady_total + grady

            grad = cv2.addWeighted(gradx_total, 0.5, grady_total, 0.5, 0)

        return grad
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    # grayscale image
    if len(colorsrc.shape)==2:
        graysrc = cv2.GaussianBlur(colorsrc, (3, 3), 0)

        ## gradient X ##
        gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
        gradx = cv2.convertScaleAbs(gradx)

        ## gradient Y ##
        grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
        grady = cv2.convertScaleAbs(grady)

        grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

        return grad

    # multi-channel image
    else:
        gradx_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        grady_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        for index in range(colorsrc.shape[2]):
            graysrc=colorsrc[:,:,index]
            graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

            ## gradient X ##
            gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
            gradx = cv2.convertScaleAbs(gradx)
            gradx_total=gradx_total+gradx

            ## gradient Y ##
            grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
            grady = cv2.convertScaleAbs(grady)
            grady_total = grady_total + grady

            grad = cv2.addWeighted(gradx_total, 0.5, grady_total, 0.5, 0)

        return grad
项目:SLIC_cityscapes    作者:wpqmanu    | 项目源码 | 文件源码
def gradient_img(colorsrc):
    '''
        http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
    '''
    SCALE = 1
    DELTA = 0
    DDEPTH = cv2.CV_16S  ## to avoid overflow

    # grayscale image
    if len(colorsrc.shape)==2:
        graysrc = cv2.GaussianBlur(colorsrc, (3, 3), 0)

        ## gradient X ##
        gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
        gradx = cv2.convertScaleAbs(gradx)

        ## gradient Y ##
        grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
        grady = cv2.convertScaleAbs(grady)

        grad = cv2.addWeighted(gradx, 0.5, grady, 0.5, 0)

        return grad

    # multi-channel image
    else:
        gradx_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        grady_total = np.zeros((colorsrc.shape[0], colorsrc.shape[1]))
        for index in range(colorsrc.shape[2]):
            graysrc=colorsrc[:,:,index]
            graysrc = cv2.GaussianBlur(graysrc, (3, 3), 0)

            ## gradient X ##
            gradx = cv2.Sobel(graysrc, DDEPTH, 1, 0, ksize=3, scale=SCALE, delta=DELTA)
            gradx = cv2.convertScaleAbs(gradx)
            gradx_total=gradx_total+gradx

            ## gradient Y ##
            grady = cv2.Sobel(graysrc, DDEPTH, 0, 1, ksize=3, scale=SCALE, delta=DELTA)
            grady = cv2.convertScaleAbs(grady)
            grady_total = grady_total + grady

            grad = cv2.addWeighted(gradx_total, 0.5, grady_total, 0.5, 0)

        return grad
项目:indices    作者:shekharshank    | 项目源码 | 文件源码
def detect_barcode(imageval):


    # load the image and convert it to grayscale

    file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8)
        img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
    gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY)

    # compute the Scharr gradient magnitude representation of the images
    # in both the x and y direction
    gradX = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 1, dy = 0, ksize = -1)
    gradY = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 0, dy = 1, ksize = -1)

    # subtract the y-gradient from the x-gradient
    gradient = cv2.subtract(gradX, gradY)
    gradient = cv2.convertScaleAbs(gradient)

    # blur and threshold the image
    blurred = cv2.blur(gradient, (9, 9))
    (_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)

    # construct a closing kernel and apply it to the thresholded image
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

    # perform a series of erosions and dilations
    closed = cv2.erode(closed, None, iterations = 4)
    closed = cv2.dilate(closed, None, iterations = 4)

    # find the contours in the thresholded image, then sort the contours
    # by their area, keeping only the largest one
    (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    c = sorted(cnts, key = cv2.contourArea, reverse = True)[0]

    # compute the rotated bounding box of the largest contour
    rect = cv2.minAreaRect(c)
    box = np.int0(cv2.cv.BoxPoints(rect))

    # draw a bounding box arounded the detected barcode and display the
    # image
    cv2.drawContours(img_data_ndarray, [box], -1, (0, 255, 0), 3)
    # cv2.imshow("Image", image)
    #cv2.imwrite("uploads/output-"+ datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")  +".jpg",image)
    # cv2.waitKey(0)

    #outputfile = "uploads/output-" + time.strftime("%H:%M:%S") + ".jpg"
    outputfile = "uploads/output.jpg"

    cv2.imwrite(outputfile,img_data_ndarray)