Python cv2 模块,equalizeHist() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.equalizeHist()

项目:sql_python_deep_learning    作者:Azure    | 项目源码 | 文件源码
def manipulate_images(sample_image):
    batch = []
    cnt = 0
    dx = 40
    ds = 512
    for i in range(0, sample_image.shape[0] - 3, 3):
        tmp = []
        for j in range(3):
            img = sample_image[i + j]
            img = 255.0 / np.amax(img) * img
            img = cv2.equalizeHist(img.astype(np.uint8))
            img = img[dx: ds - dx, dx: ds - dx]
            img = cv2.resize(img, (224, 224))
            tmp.append(img)

        batch.append(tmp)
    batch = np.array(batch, dtype=np.float32)
    return batch
项目:Predicting-First-Impressions    作者:mel-2445    | 项目源码 | 文件源码
def histogramEqualization(originalImage, space):
    from cv2 import equalizeHist
    equalized = equalizeHist(originalImage)
    return equalized
项目:bib-tagger    作者:KateRita    | 项目源码 | 文件源码
def find_bib(image):
  width, height, depth = image.shape

  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
  #gray = cv2.equalizeHist(gray)
  blurred = cv2.GaussianBlur(gray,(5,5),0)

  debug_output("find_bib_blurred", blurred)
  #binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0);
  ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU);
  #ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY);
  debug_output("find_bib_binary", binary)
  threshold_contours,hierarchy = find_contours(binary)

  debug_output("find_bib_threshold", binary)

  edges = cv2.Canny(gray,175,200, 3)
  edge_contours,hierarchy = find_contours(edges)

  debug_output("find_bib_edges", edges)

  contours = threshold_contours + edge_contours
  debug_output_contours("find_bib_threshold_contours", image, contours)

  rectangles = get_rectangles(contours)

  debug_output_contours("find_bib_rectangles", image, rectangles)

  potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)]

  debug_output_contours("find_bib_potential_bibs", image, potential_bibs)

  ideal_aspect_ratio = 1.0
  potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio))

  return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]])

#
# Checks that the size and aspect ratio of the contour is appropriate for a bib.
#
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def sweep_img_patches(img, patch_size, stride, target_scale=None, 
                      equalize_hist=False):
    nb_row = round(float(img.shape[0] - patch_size)/stride + .49)
    nb_col = round(float(img.shape[1] - patch_size)/stride + .49)
    nb_row = int(nb_row)
    nb_col = int(nb_col)
    sweep_hei = patch_size + (nb_row - 1)*stride
    sweep_wid = patch_size + (nb_col - 1)*stride
    y_gap = int((img.shape[0] - sweep_hei)/2)
    x_gap = int((img.shape[1] - sweep_wid)/2)
    patch_list = []
    for y in xrange(y_gap, y_gap + nb_row*stride, stride):
        for x in xrange(x_gap, x_gap + nb_col*stride, stride):
            patch = img[y:y+patch_size, x:x+patch_size].copy()
            if target_scale is not None:
                patch_max = patch.max() if patch.max() != 0 else target_scale
                patch *= target_scale/patch_max
            if equalize_hist:
                patch = cv2.equalizeHist(patch.astype('uint8'))
            patch_list.append(patch.astype('float32'))
    return np.stack(patch_list), nb_row, nb_col
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def enhance(img,blockSize=8,boxSize=4):
    """image enhancement
    return: enhanced image
    """
#    img=cv2.equalizeHist(np.uint8(img))
    img,imgfore=segmentation(img)
#    img=blockproc(np.uint8(img),cv2.equalizeHist,(16,16))
    img=img.copy(order='C').astype(np.float64)
    theta=_pre.calcDirectionBox(img,blockSize,boxSize)
    wl=calcWlBox(img,blockSize,boxSize)
    sigma=5
    img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
    img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
    img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
    img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)
    img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma)

    img=np.asarray(img)
    imgfore=cv2.erode(imgfore,np.ones((8,8)),iterations=4)
    img[np.where(imgfore==0)]=255
    img=basic.truncate(img,method='default')

    return img,imgfore
项目:ssd_keras    作者:pierluigiferrari    | 项目源码 | 文件源码
def histogram_eq(image):
    '''
    Perform histogram equalization on the input image.

    See https://en.wikipedia.org/wiki/Histogram_equalization.
    '''

    image1 = np.copy(image)

    image1 = cv2.cvtColor(image1, cv2.COLOR_RGB2HSV)

    image1[:,:,2] = cv2.equalizeHist(image1[:,:,2])

    image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)

    return image1
项目:PixivAvatarBot    作者:kophy    | 项目源码 | 文件源码
def detect_faces(image):
    """
    ??????????
    :param image: ?????(opencv??)
    :return: ?????????list
    """
    gray = None;
    try:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
    except:
        gray = image;
    gray = cv2.equalizeHist(gray);
    faces = detector.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (face_width, face_width));
    return faces;
项目:Rapider    作者:yazdipour    | 项目源码 | 文件源码
def ocr():
    img = numpy.array(ImageGrab.grab().convert('RGB'))[:, :, ::-1].copy()[y:y+h, x:x+w][:,:,2]
    # img = cv2.equalizeHist(img)
    index=0
    for tmp in templates:
        res = cv2.matchTemplate(img,tmp,cv2.TM_CCORR_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        ix,iy=max_loc[0]/pw,max_loc[1]/ph
        strx=txtbox[iy][ix].get()
        index=index+1
        txtbox[iy][ix].insert(len(strx),str(index))
    return
项目:tensorflow-pi    作者:karaage0703    | 项目源码 | 文件源码
def equalizeHistRGB(src):

    RGB = cv2.split(src)
    Blue   = RGB[0]
    Green = RGB[1]
    Red    = RGB[2]
    for i in range(3):
        cv2.equalizeHist(RGB[i])

    img_hist = cv2.merge([RGB[0],RGB[1], RGB[2]])
    return img_hist

# ????????
项目:luna16    作者:gzuidhof    | 项目源码 | 文件源码
def histogram_equalization(images, adaptive=True):

    _images = np.array(images * 255, dtype = np.uint8)

    pool = ThreadPool(4)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

    def process_image(image):
        #print image.shape, image.dtype
        image = image.transpose(1,2,0)

        if adaptive:
            image[:,:,0] = clahe.apply(image[:,:,0])
            image[:,:,1] = clahe.apply(image[:,:,1])
            image[:,:,2] = clahe.apply(image[:,:,2])
        else:
            image[:,:,0] = cv2.equalizeHist(image[:,:,0])
            image[:,:,1] = cv2.equalizeHist(image[:,:,1])
            image[:,:,2] = cv2.equalizeHist(image[:,:,2])

        image = image.transpose(2,0,1)
        return image

    equalized = pool.map(process_image, _images)
    equalized = np.array(equalized, dtype=np.float32)/255.

    #visualize_data(np.append(images[:8],equalized[:8],axis=0).transpose(0,2,3,1))
    return equalized
项目:deeplearning-cats-dogs-tutorial    作者:adilmoujahid    | 项目源码 | 文件源码
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):

    #Histogram Equalization
    img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
    img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
    img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])

    #Image Resizing
    img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)

    return img
项目:deeplearning-cats-dogs-tutorial    作者:adilmoujahid    | 项目源码 | 文件源码
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):

    #Histogram Equalization
    img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
    img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
    img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])

    #Image Resizing
    img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)

    return img
项目:deeplearning-cats-dogs-tutorial    作者:adilmoujahid    | 项目源码 | 文件源码
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):

    #Histogram Equalization
    img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
    img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
    img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])

    #Image Resizing
    img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)

    return img
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_image(self, cv_image, header, tag):
        """ process the image """
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

        # mask for color range
        if self.color_range:
            mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1])
            count = cv2.countNonZero(mask)
            if count:
                kernel = np.ones((5, 5), np.uint8)
                mask = cv2.dilate(mask, kernel, iterations=2)
                contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)

                for i, c in enumerate(contours):
                    x, y, w, h = cv2.boundingRect(c)
                    if self.prefix is not None:
                        name = '{0}{1}_{2}_{3}.png'.format(self.prefix,
                                                           tag,
                                                           header.seq, i)
                        print name
                        roi = cv_image[y:y+h, x:x+w]
                        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                        gray = cv2.equalizeHist(gray)
                        cv2.imwrite(name, gray)

                for c in contours:
                    x, y, w, h = cv2.boundingRect(c)
                    cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0))
            elif self.prefix is not None:
                name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag,
                                                        header.seq, )
                cv2.imwrite(name, cv_image)

        cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(tag, 600, 600)
        cv2.imshow(tag, cv_image)
        cv2.waitKey(1)
项目:sherlock-hack    作者:StuartIanNaylor    | 项目源码 | 文件源码
def doTask(self, tstamp):
        """Run object detection."""
        result = list()
        try:
            image = images[tstamp]

            height, width = image.shape[:2]
            gray = cv2.resize(
            image,
            (int(width/3), int(height/3))
            )
            gray = cv2.cvtColor(
            gray,
            cv2.COLOR_BGR2GRAY
            )
            gray = cv2.equalizeHist(
            gray
            )


            #size = np.shape(image)[:2]
            rects = self._classifier.detectMultiScale(
                gray,
                scaleFactor=1.2,
                minNeighbors=3,
                minSize=(20,20),
                flags=cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT | cv2.cv.CV_HAAR_DO_CANNY_PRUNING | cv2.cv.CV_HAAR_DO_ROUGH_SEARCH

                )
            if len(rects):
                for a,b,c,d in rects:
                    result.append((a,b,c,d, self._color))
        except:
            print('Error in detector !!!')
        return result

# Monitor framerates for the given seconds past.
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def equalizeImage(img, save_path=None, name_additive='_eqHist'):
    '''
    Equalize the histogram (contrast) of an image
    works with RGB/multi-channel images
    and flat-arrays

    @param img  - image_path or np.array
    @param save_path if given output images will be saved there
    @param name_additive if given this additive will be appended to output images

    @return output images if input images are numpy.arrays and no save_path is given
    @return None elsewise
    '''

    if isinstance(img, string_types):
        img = PathStr(img)
        if not img.exists():
            raise Exception("image path doesn't exist")
        img_name = img.basename().replace('.', '%s.' % name_additive)
        if save_path is None:
            save_path = img.dirname()
        img = cv2.imread(img)

    if img.dtype != np.dtype('uint8'):
        # openCV cannot work with float arrays or uint > 8bit
        eqFn = _equalizeHistogram
    else:
        eqFn = cv2.equalizeHist
    if len(img.shape) == 3:  # multi channel img like rgb
        for i in range(img.shape[2]):
            img[:, :, i] = eqFn(img[:, :, i])
    else:  # grey scale image
        img = eqFn(img)
    if save_path:
        img_name = PathStr(save_path).join(img_name)
        cv2.imwrite(img_name, img)
    return img
项目:Brain_Tumor_Segmentation    作者:KarthikRevanuru    | 项目源码 | 文件源码
def get_data_id(path):
    sample_image = get_3d_data(path)

    sample_image[sample_image == -2000] = 0


    batch = []
    cnt = 0
    dx = 40
    ds = 512
    for i in range(0, sample_image.shape[0] - 3, 3):
        tmp = []
        for j in range(3):
            img = sample_image[i + j]
            img = 255.0 / np.amax(img) * img
            img = cv2.equalizeHist(img.astype(np.uint8))
            img = img[dx: ds - dx, dx: ds - dx]
            img = cv2.resize(img, (224, 224))
            tmp.append(img)

        tmp = np.array(tmp)
        batch.append(np.array(tmp))


    batch = np.array(batch)
    return batch
项目:Brain_Tumor_Segmentation    作者:KarthikRevanuru    | 项目源码 | 文件源码
def get_data_id(path):
    sample_image = get_3d_data(path)

    sample_image[sample_image == -2000] = 0


    batch = []
    cnt = 0
    dx = 40
    ds = 512
    for i in range(0, sample_image.shape[0] - 3, 3):
        tmp = []
        for j in range(3):
            img = sample_image[i + j]
            img = 255.0 / np.amax(img) * img
            img = cv2.equalizeHist(img.astype(np.uint8))
            img = img[dx: ds - dx, dx: ds - dx]
            img = cv2.resize(img, (224, 224))
            tmp.append(img)

        tmp = np.array(tmp)
        batch.append(np.array(tmp))


    batch = np.array(batch)
    return batch
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def histo_equalized(img):
    assert (imgs.shape[0] == 1)
    img_equalized = cv2.equalizeHist(
        np.array(img, dtype=np.uint8))
    return img_equalized
项目:cnn-traffic-light-evaluation    作者:takeitallsource    | 项目源码 | 文件源码
def histogram_equalization(image):
    image = cv2.cvtColor(image, COLOR_SPACE)
    x, y, z = cv2.split(image)

    if INTENSITY_COMPONENT == 1:
        x = cv2.equalizeHist(x)
    elif INTENSITY_COMPONENT == 2:
        y = cv2.equalizeHist(y)
    elif INTENSITY_COMPONENT == 3:
        z = cv2.equalizeHist(z)

    return cv2.cvtColor(cv2.merge((x, y, z)), INVERSE_COLOR_SPACE)
项目:cnn-traffic-light-evaluation    作者:takeitallsource    | 项目源码 | 文件源码
def histogram_equalization(image):
    return cv2.equalizeHist(image)
项目:huaat_ml_dl    作者:ieee820    | 项目源码 | 文件源码
def get_data_id(path):
    sample_image = get_3d_data(path)
    sample_image[sample_image == -2000] = 0
    # f, plots = plt.subplots(4, 5, sharex='col', sharey='row', figsize=(10, 8))

    batch = []
    cnt = 0
    dx = 40
    ds = 512
    for i in range(0, sample_image.shape[0] - 3, 3):
        tmp = []
        for j in range(3):
            img = sample_image[i + j]
            img = 255.0 / np.amax(img) * img
            img = cv2.equalizeHist(img.astype(np.uint8))
            img = img[dx: ds - dx, dx: ds - dx]
            img = cv2.resize(img, (224, 224))
            tmp.append(img)

        tmp = np.array(tmp)
        batch.append(np.array(tmp))

        # if cnt < 20:
        #     plots[cnt // 5, cnt % 5].axis('off')
        #     plots[cnt // 5, cnt % 5].imshow(np.swapaxes(tmp, 0, 2))
        # cnt += 1

    # plt.show()
    batch = np.array(batch)
    return batch
项目:Vehicle-identification    作者:soloice    | 项目源码 | 文件源码
def equalize_hist_all(self, root='../data/val/'):
        raw_root, out_root = root + 'images/', root + 'normalized/'
        if not os.path.exists(out_root):
            os.mkdir(out_root)
        cnt = 0
        for parent, _, files in os.walk(raw_root):
            for name in files:
                img = cv2.imread(parent + name)
                b, g, r = cv2.split(img)
                bb, gg, rr = cv2.equalizeHist(b), cv2.equalizeHist(g), cv2.equalizeHist(r)
                [row, col] = b.shape

                if row > col:
                    d = row - col
                    add_block = np.zeros((d, row))
                    new_bb = np.vstack((bb.T, add_block))
                    new_gg = np.vstack((gg.T, add_block))
                    new_rr = np.vstack((rr.T, add_block))
                    new_bb = new_bb.T
                    new_gg = new_gg.T
                    new_rr = new_rr.T
                else:
                    d = col - row
                    add_block = np.zeros((d, col))
                    new_bb = np.vstack((add_block, bb))
                    new_gg = np.vstack((add_block, gg))
                    new_rr = np.vstack((add_block, rr))

                new_bb, new_gg, new_rr = np.uint8(new_bb), np.uint8(new_gg), np.uint8(new_rr)
                new_image = cv2.merge([new_bb, new_gg, new_rr])

                res = cv2.resize(new_image, (100, 100), interpolation=cv2.INTER_CUBIC)
                new_name = out_root + name
                cv2.imwrite(new_name, res)
                cnt += 1
                if cnt % 500 == 0:
                    print 'Processed', cnt, 'images!'
项目:Fingerprint-Recognition    作者:zhangzimou    | 项目源码 | 文件源码
def segmentation(img, blockSize=8, h=352, w=288):
    add0=(16-img.shape[0]%16)/2
    add1=(16-img.shape[1]%16)/2
    img=np.vstack((  255*np.ones((add0,img.shape[1])), img, 255*np.ones((add0,img.shape[1]))  ))
    img=np.hstack((  255*np.ones((img.shape[0],add1)), img, 255*np.ones((img.shape[0],add1))  ))
#    img=np.uint8(img)
    ## reference: IMPROVED FINGERPRINT IMAGE SEGMENTATION USING NEW MODIFIED GRADIENT
    #               BASED TECHNIQUE
    sobel_x=np.array([[1, 0, -1],[2, 0, -2],[1, 0, -1]])
    sobel_y=np.array([[1, 2, 1],[0, 0, 0],[-1,-2,-1]])
    par_x=convolve2d(img,sobel_x,mode='same')
    par_y=convolve2d(img,sobel_y,mode='same')
    #img=basic.blockproc(img,cv2.equalizeHist,(blockSize,blockSize))
    stdx=blockproc(par_x,np.std,(16,16),True)
    stdy=blockproc(par_y,np.std,(16,16),True)
    grddev=stdx+stdy
    threshold=90
    index=grddev[1:-1,1:-1].copy()
    index[np.where(index<threshold)]=0
    index[np.where(index>=threshold)]=1
    a=np.zeros(grddev.shape)
    a[1:-1,1:-1]=index
    index=a

    valid=np.zeros(img.shape)
    valid_b=block_view(valid,(16,16))
    valid_b[:]=index[:,:,np.newaxis,np.newaxis]

    kernel = np.ones((8,8),np.uint8)
    # first dilate to delete the invalid value inside the fingerprint region
    valid=cv2.dilate(valid,kernel,iterations = 5)
    # then erode more to delete the valid value outside the fingerprint region
    valid=cv2.erode(valid, kernel, iterations = 12)
    # dilate again to increase the valid value area in compensate for the lose
    # due to erosion in the last step
    valid=cv2.dilate(valid, kernel, iterations=7)

    img[np.where(valid==0)]=255
    # align the image    
    #img=align(img, valid)         
    return cut(img, valid, h, w)
项目:Smart-Surveillance-System-using-Raspberry-Pi    作者:OmkarPathak    | 项目源码 | 文件源码
def normalize_intensity(images):
    """ This method normalizes the size and pixel intensity of an image.

    Each image has their own distribution of intensity pixels in grayscale.
    This function normalizes these intensities such that the image uses
    all the range of grayscale values.
    """
    images_norm = []
    for image in images:
        is_color = len(image.shape) == 3
        if is_color:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        images_norm.append(cv2.equalizeHist(image))
    return images_norm
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def histogram_equalization(images, adaptive=True):

    _images = np.array(images * 255, dtype = np.uint8)

    pool = ThreadPool(4)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

    def process_image(image):
        #print image.shape, image.dtype
        image = image.transpose(1,2,0)

        if adaptive:
            image[:,:,0] = clahe.apply(image[:,:,0])
            image[:,:,1] = clahe.apply(image[:,:,1])
            image[:,:,2] = clahe.apply(image[:,:,2])
        else:
            image[:,:,0] = cv2.equalizeHist(image[:,:,0])
            image[:,:,1] = cv2.equalizeHist(image[:,:,1])
            image[:,:,2] = cv2.equalizeHist(image[:,:,2])

        image = image.transpose(2,0,1)
        return image

    equalized = pool.map(process_image, _images)
    equalized = np.array(equalized, dtype=np.float32)/255.

    #visualize_data(np.append(images[:8],equalized[:8],axis=0).transpose(0,2,3,1))
    return equalized
项目:anime-face-collector    作者:uehara1414    | 项目源码 | 文件源码
def get_faces(img_path, cascade_file = "lbpcascade_animeface.xml"):
    cascade = cv2.CascadeClassifier(cascade_file)
    image = cv2.imread(img_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)

    faces = cascade.detectMultiScale(gray,
                                     scaleFactor=1.1,
                                     minNeighbors=5,
                                     minSize=(24, 24))
    return faces
项目:Face_recog_LBPH    作者:vedvasu    | 项目源码 | 文件源码
def detect(self, src):
        if np.ndim(src) == 3:
            src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
        src = cv2.equalizeHist(src)
        rects = self.cascade.detectMultiScale(src, scaleFactor=self.scaleFactor, minNeighbors=self.minNeighbors, minSize=self.minSize)
        if len(rects) == 0:
            return np.ndarray((0,))
        rects[:,2:] += rects[:,:2]
        return rects
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def preprocess(img):
    '''????????'''
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    #img=cv2.GaussianBlur(img,(3,3),0)
    img=cv2.medianBlur(img,5)
    img=cv2.equalizeHist(img)
    return img
项目:Girl-s-Camera    作者:SHANEGU56    | 项目源码 | 文件源码
def face_detector1(image, cascade):
    global face_num1 #??????
    grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #????? 
    equalImage = cv2.equalizeHist(grayImage) #??????
    faces = cascade.detectMultiScale(equalImage, scaleFactor=1.3, minNeighbors=3)

    for (x,y,w,h) in faces:
        #?????????????????????????????????
        cv2.imwrite("/Users/gushixin/Desktop/OwnerSensor/faceOnly/other/other_%s.png" %(face_num1), image[y:y+h, x:x+w])
        cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
        face_num1 = face_num1 + 1
    return image
项目:Girl-s-Camera    作者:SHANEGU56    | 项目源码 | 文件源码
def face_detector0(image, cascade):
    global face_num0 #??????
    grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #????? 
    equalImage = cv2.equalizeHist(grayImage) #??????
    faces = cascade.detectMultiScale(equalImage, scaleFactor=1.3, minNeighbors=3)

    for (x,y,w,h) in faces:
        #?????????????????????????????????
        cv2.imwrite("/Users/gushixin/Desktop/OwnerSensor/faceOnly/owner/owner_%s.png" %(face_num0), image[y:y+h, x:x+w])
        cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
        face_num0 = face_num0 + 1
    return image
项目:Girl-s-Camera    作者:SHANEGU56    | 项目源码 | 文件源码
def face_detector(image, cascade):
    global face_num #??????
    grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #????? 
    equalImage = cv2.equalizeHist(grayImage) #??????
    faces = cascade.detectMultiScale(equalImage, scaleFactor=1.3, minNeighbors=3)

    for (x,y,w,h) in faces:
        #?????????????????????????????????
        cv2.imwrite("/Users/gushixin/Desktop/OwnerSensor/faceOnly/owner/self_%s.png" %(face_num), image[y:y+h, x:x+w])
        #cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
        face_num = face_num + 1
    return image
项目:CNN    作者:Celia-xy    | 项目源码 | 文件源码
def load(test=False, cols=None):
    """Loads data from FTEST if *test* is True, otherwise from FTRAIN.
    Pass a list of *cols* if you're only interested in a subset of the
    target columns.
    """
    fname = FTEST if test else FTRAIN
    df = read_csv(os.path.expanduser(fname))  # load pandas dataframe

    # The Image column has pixel values separated by space; convert
    # the values to numpy arrays:
    df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))

    if cols:  # get a subset of columns
        df = df[list(cols) + ['Image']]

    # print(df.count())  # prints the number of values for each column
    df = df.dropna()  # drop all rows that have missing values in them


    X_0 = np.vstack(df['Image'].values)
    X = np.zeros(X_0.shape)
    for i in range(X_0.shape[0]):
        im = X_0[i]
        im = im.reshape(96, 96)
        im = im.astype(np.uint8)
        eq = cv2.equalizeHist(im)
        eq = eq.reshape(1, 96*96)
        X[i] = eq
    X = X/255.  # scale pixel values to [0, 1]
    X = X.astype(np.float32)

    if not test:  # only FTRAIN has any target columns
        y = df[df.columns[:-1]].values
        y = (y - 48) / 48  # scale target coordinates to [-1, 1]
        X, y = shuffle(X, y, random_state=42)  # shuffle train data
        y = y.astype(np.float32)
    else:
        y = None

    return X, y
项目:Defect-Prediction    作者:Jorba123    | 项目源码 | 文件源码
def equalize_image_channel(channel):
    """ Histogram equalization of a single image channel."""

    if channel[0][0].shape == (3):
        raise AttributeError("More than one color channel.")
    return cv.equalizeHist(channel)
项目:Kaggle-DSB    作者:Wrosinski    | 项目源码 | 文件源码
def histo_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = cv2.equalizeHist(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized
项目:Kaggle-DSB    作者:Wrosinski    | 项目源码 | 文件源码
def histo_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = cv2.equalizeHist(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized
项目:Kaggle-DSB    作者:Wrosinski    | 项目源码 | 文件源码
def histo_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = cv2.equalizeHist(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized
项目:Kaggle-DSB    作者:Wrosinski    | 项目源码 | 文件源码
def histo_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = cv2.equalizeHist(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized
项目:Kaggle-DSB    作者:Wrosinski    | 项目源码 | 文件源码
def histo_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = cv2.equalizeHist(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized
项目:keras-anime-face-recognition    作者:namakemono    | 项目源码 | 文件源码
def detect(filename, cascade_file = "lbpcascade_animeface.xml"):
    if not os.path.isfile(cascade_file):
        raise RuntimeError("%s: not found" % cascade_file)
    cascade = cv2.CascadeClassifier(cascade_file)
    image = cv2.imread(filename)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    faces = cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (24, 24))
    return image, faces
项目:deepvisualminer    作者:pathbreak    | 项目源码 | 文件源码
def scale(orig_top_dir, scaled_dest_dir, width, height, make_grayscale = True, equalize_hist = False):

    if not os.path.exists(scaled_dest_dir):
        os.makedirs(scaled_dest_dir)

    for label in os.listdir(orig_top_dir):
        label_dir = os.path.join(orig_top_dir, label)
        dest_label_dir = os.path.join(scaled_dest_dir, label)

        if not os.path.exists(dest_label_dir):
            os.mkdir(dest_label_dir)

        for imgfilename in os.listdir(label_dir):
            orig_imgfilepath = os.path.join(label_dir, imgfilename)
            print(orig_imgfilepath)

            img = cv2.imread(orig_imgfilepath)

            if make_grayscale:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                if equalize_hist:
                    print("Equalizing")
                    img = cv2.equalizeHist(img)

            elif equalize_hist:
                print("Warning: Invalid arguments. Histogram equalization can be done only if grayscale is enabled. Ignoring")

            img = cv2.resize(img, (width, height))

            dest_imgfilepath = os.path.join(dest_label_dir, imgfilename)

            cv2.imwrite(dest_imgfilepath, img)

            print(orig_imgfilepath,' -> ', dest_imgfilepath)
项目:deepvisualminer    作者:pathbreak    | 项目源码 | 文件源码
def recognize(img_file, expected_label, models_dir, eigen=True, fischer=True, lbp=True, equalize_hist=False):

    eigen_label = fischer_label = lbp_label = -1

    with open(os.path.join(models_dir, 'model.json'), 'r') as model_file:
        model = json.load(model_file)
        train_img_size = (model['height'], model['width'])

    img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
    # If training images were equalized, better to perform the same 
    # operation during recognition too.
    if equalize_hist:
        img = cv2.equalizeHist(img)

    if img.shape != train_img_size:
        img = cv2.resize( img, train_img_size[::-1] )

    if eigen:
        eigen_recog = face.createEigenFaceRecognizer();
        eigen_recog.load(os.path.join(models_dir, 'eigen.yml'))
        eigen_label = eigen_recog.predict(img)
        print('Eigen done')

    if fischer:
        fischer_recog = face.createFisherFaceRecognizer();
        fischer_recog.load(os.path.join(models_dir, 'fischer.yml'))
        fischer_label = fischer_recog.predict(img)
        print('Fischer done')

    if lbp:
        lbp_recog = face.createLBPHFaceRecognizer();
        lbp_recog.load(os.path.join(models_dir, 'lbp.yml'))
        lbp_label = lbp_recog.predict(img)
        print('LBP done')


    print(eigen_label, fischer_label, lbp_label)
    return  eigen_label, fischer_label, lbp_label
项目:Cerebrum    作者:tyler-cromwell    | 项目源码 | 文件源码
def preprocess(frame, width, height, x, y, w, h):
    """
    Preprocesses an image for Face Recognition
    """
    cropped = frame[y: y+h, x: x+w]
    grayed = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
    resized = cv2.resize(grayed, (width, height))
    equalized = cv2.equalizeHist(resized)
    filtered = cv2.bilateralFilter(equalized, 5, 60, 60)
    return filtered
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def hist_equalise(img):
  eq=cv2.equalizeHist(img)
  return eq
# *********************************************************

# ****************** Cluster image ************************

# *********************************************************
项目:PolarNavigatorServer    作者:zhouyuhangnju    | 项目源码 | 文件源码
def enhancingImage(filename, folder = 'modisProcessing/MODIS/tiff/'):
    print filename

    dataset = gdal.Open(folder+filename+'.tif', GA_Update)
    band = dataset.GetRasterBand(1)
    picarray = band.ReadAsArray()
    # print picarray

    x_size = dataset.RasterXSize # Raster xsize
    y_size = dataset.RasterYSize # Raster ysize
    picarray = homofilter(picarray, x_size, y_size)
    picarray = cv2.equalizeHist(picarray)
    # picarray = unifyGrayCenter(picarray)

    band.WriteArray(picarray)
项目:PolarNavigatorServer    作者:zhouyuhangnju    | 项目源码 | 文件源码
def enhancingImage(filename, folder = 'modisProcessing/MODIS/tiff/'):
    print filename

    dataset = gdal.Open(folder+filename+'.tif', GA_Update)
    band = dataset.GetRasterBand(1)
    picarray = band.ReadAsArray()
    # print picarray

    x_size = dataset.RasterXSize # Raster xsize
    y_size = dataset.RasterYSize # Raster ysize
    picarray = homofilter(picarray, x_size, y_size)
    picarray = cv2.equalizeHist(picarray)
    # picarray = unifyGrayCenter(picarray)

    band.WriteArray(picarray)
项目:PolarNavigatorServer    作者:zhouyuhangnju    | 项目源码 | 文件源码
def enhancingImage(filename, folder = 'modisProcessing/MODIS/tiff/'):
    print filename

    dataset = gdal.Open(folder+filename+'.tif', GA_Update)
    band = dataset.GetRasterBand(1)
    picarray = band.ReadAsArray()
    # print picarray

    x_size = dataset.RasterXSize # Raster xsize
    y_size = dataset.RasterYSize # Raster ysize
    picarray = homofilter(picarray, x_size, y_size)
    picarray = cv2.equalizeHist(picarray)
    # picarray = unifyGrayCenter(picarray)

    band.WriteArray(picarray)
项目:blog    作者:benhoff    | 项目源码 | 文件源码
def detect_faces(self, image: np.ndarray):
        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        return faces
项目:blog    作者:benhoff    | 项目源码 | 文件源码
def detect_faces(self, image: np.ndarray):
        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        return faces
项目:Home-Security    作者:gaborvecsei    | 项目源码 | 文件源码
def detect(self, image):
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)
        blurred = cv2.GaussianBlur(gray_image, self.kernel, self.sigma)

        if self.prevImage is None:
            self.prevImage = blurred
        diff = cv2.absdiff(self.prevImage, blurred)
        _, binary = cv2.threshold(diff, 21, 255, cv2.THRESH_BINARY)

        if eval(cv2.__version__.split('.')[0]) == 3:
            _, cnts, hier = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            cnts, hier = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

        if len(cnts) < 1:
            is_detected = False
            contour = None
        else:
            largest_contour = cnts[0]
            if cv2.contourArea(largest_contour) < self.min_detection_area:
                is_detected = False
                contour = None
            else:
                is_detected = True
                contour = largest_contour
                self.prevImage = blurred
        return is_detected, contour