Python cv2 模块,CV_8U 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用cv2.CV_8U

项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    img=cv2.medianBlur(img,5)
    kernel=np.ones((3,3),np.uint8)

    #img=cv2.erode(img,kernel,iterations = 1)
    sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    dilation = cv2.dilate(sobel, element2, iterations = 1)
    erosion = cv2.erode(dilation, element1, iterations = 1)
    dilation2 = cv2.dilate(erosion, element2,iterations = 3)
    #img=cv2.dilate(img,kernel,iterations = 1)
    #img=cv2.Canny(img,100,200)
    return dilation2
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def logoDetect(img,imgo):
    '''???????????????'''
    imglogo=imgo.copy()
    img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC)
    #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3)
    ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9)
    img=cv2.Canny(img,100,200)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    img = cv2.dilate(img, element2,iterations = 1)
    img = cv2.erode(img, element1, iterations = 3)
    img = cv2.dilate(img, element2,iterations = 3)

    #????
    im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    tema=0
    result=[]
    for con in contours:
        x,y,w,h=cv2.boundingRect(con)
        area=w*h
        ratio=max(w/h,h/w)
        if area>300 and area<20000 and ratio<2:
            if area>tema:
                tema=area
                result=[x,y,w,h]
                ratio2=ratio
    #?????????????????,??????????
    logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)]
    logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3]
    cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2)
    cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2)
    print tema,ratio2,result
    logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]]
    cv2.imwrite('./logo2.jpg',logo2)

    return img
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def create_cascade_neg_data():
    img = cv2.imread(FLAGS.negatives_spritesheet)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
    height, width, _ = img.shape

    c = 0
    txt = ""
    for y in xrange(0, height, FLAGS.image_size):
        for x in xrange(0, width, FLAGS.image_size):
            cv2.imwrite(FLAGS.output_dir + "/negatives/" + str(c) + ".png", img[y:y+FLAGS.image_size, x:x+FLAGS.image_size])
            txt += "negatives/" + str(c) + ".png" + "\n"
            c += 1

    with open(FLAGS.output_dir + "/negatives.info", 'w') as file:
        file.write(txt)        

    return c

# ========================================== #
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def im_normalize(im, lo=0, hi=255, dtype='uint8'):
    return cv2.normalize(im, alpha=lo, beta=hi, norm_type=cv2.NORM_MINMAX, dtype={'uint8': cv2.CV_8U, \
                                                                                  'float32': cv2.CV_32F, \
                                                                                  'float64': cv2.CV_64F}[dtype])
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def sobel(im, dx=1, dy=1, blur=3): 
    if blur is None or blur == 0: 
        blur_im = im
    else: 
        blur_im = cv2.GaussianBlur(im, (blur,blur), 0)
    return cv2.Sobel(blur_im, cv2.CV_8U, dx, dy)
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def isomap_playground():
    isomaps =[]
    for i in range(len(isomap_paths)):
        isomaps.append(cv2.imread(isomap_paths[i], cv2.IMREAD_UNCHANGED))

    old_isomap_merged = np.zeros([ISOMAP_SIZE, ISOMAP_SIZE, 4], dtype='uint8')

    all_isomaps_merged = merge(isomaps)
    show_isomap('all_isomaps_merged', all_isomaps_merged)
    #cv2.waitKey()
    #cv2.destroyAllWindows()
    #exit()

    for i in range(len(isomaps)):
        new_isomap_merged = merge([old_isomap_merged, isomaps[i]])
        #blurryness = cv2.Laplacian(isomaps[i], cv2.CV_64F).var()
        blurryness_map = cv2.Laplacian(isomaps[i], cv2.CV_64F)
        blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges
        blurryness = blurryness_map.var()
        #show_isomap('laplac',cv2.Laplacian(isomaps[i], cv2.CV_8U))
        #print ('max', np.max(cv2.Laplacian(isomaps[i], cv2.CV_64F)), 'min', np.min(cv2.Laplacian(isomaps[i], cv2.CV_64F)))
        coverage = calc_isomap_coverage(isomaps[i])
        print(isomap_paths[i]," isomap coverage:",coverage,"blur detection:",blurryness, "overall score", coverage*coverage*blurryness)
        show_isomap('new isomap', isomaps[i])
        show_isomap('merge', new_isomap_merged)
        cv2.waitKey()

        old_isomap_merged = new_isomap_merged


    #cv2.imwrite('/user/HS204/m09113/Desktop/merge_test.png', isomap_merged)

    #cv2.waitKey()
    #cv2.destroyAllWindows()
项目:Vehicle-Logo-Recognition    作者:xinyuexy    | 项目源码 | 文件源码
def process(img):
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    gau=cv2.GaussianBlur(gray,(5,5),0)
    ret,thre = cv2.threshold(gau, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    med=cv2.medianBlur(thre,5)
    canny=cv2.Canny(thre,100,200)
    #sobel = cv2.Sobel(thre, cv2.CV_8U, 1, 0, ksize = 3)
    dilation=cv2.dilate(canny,element2,iterations = 1)
    dst=cv2.erode(dilation, element1, iterations = 1)
    return dst
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def create_mask(model, x, keep_prob, src):
    """
    object detection via sliding windows
    Args:
        model: tensorflow model which is used for detection
        x: input data placeholder
        keep_prob: keep probability placeholder (dropout)
        src: image to apply the detection
    Returns:
        image mask scaled between 0 and 255 
    """

    global sess
    height, width = src.shape
    mask = np.zeros((height,width), np.float32)
    input_size = (FLAGS.input_size, FLAGS.input_size)
    min_window_size = (FLAGS.min_window_size, FLAGS.min_window_size)
    max_window_size = (FLAGS.max_window_size, FLAGS.max_window_size)

    for windows, coords in utils.slidingWindow(src, FLAGS.step_size, input_size, FLAGS.scale_factor, min_window_size, max_window_size):
        feed = {x:windows, keep_prob:1.0}
        out = sess.run(model, feed_dict = feed)

        for i in range(0, len(out)):
            out_scaled = cv2.resize(np.reshape(out[i], [FLAGS.label_size,FLAGS.label_size]), 
                                    coords[i].size(), interpolation=cv2.INTER_CUBIC)

            mask[coords[i].y : coords[i].y2(), coords[i].x : coords[i].x2()] += out_scaled

    # image processing
    mask = cv2.normalize(mask, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
    #  cv2.imwrite(FLAGS.output_dir + FLAGS.test + '_mask_' + str(FLAGS.step_size) + '_' + str(datetime.datetime.now()) + '.png', mask)
    return mask


# ============================================================= #
项目:DrosophilaCooperative    作者:avaccari    | 项目源码 | 文件源码
def trackObjects(self):
        for area in self.trackedAreasList:
            # Template matching
            gray = cv2.cvtColor(self.processedFrame, cv2.COLOR_BGR2GRAY)
            templ = area.getGrayStackAve()
            cc = cv2.matchTemplate(gray, templ, cv2.TM_CCOEFF_NORMED)
            cc = cc * cc * cc * cc
            _, cc = cv2.threshold(cc, 0.1, 0, cv2.THRESH_TOZERO)
            cc8 = cv2.normalize(cc, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
            mask = np.zeros_like(cc8)

            # Search match within template region
            mcorn = area.getEnlargedCorners(0) # If not 0, enalrge the search
            cv2.rectangle(mask, mcorn[0], mcorn[1], 255, -1)
            _, _, _, mx = cv2.minMaxLoc(cc8, mask)

#            kp = area.getKalmanPredict()
#            area.updateWindow(kp)
#            area.setTemplate(self.processedFrame)

            # Prevent large spatial jumps
            (c, r, _, _) = area.getcrwh()
            jump = 10
            if abs(c - mx[0]) < jump and abs(r - mx[1]) < jump:
#                area.setKalmanCorrect(mx)
                area.updateWindow(mx)
            else:
#                area.setKalmanCorrect((c, r))
                area.updateWindow((c, r))
            area.setTemplate(self.processedFrame)

            # Show the template stack
            if self.showTemplate is True:
                cv2.imshow('Stack: '+str(area), area.getStack())
            else:
                try:
                    cv2.destroyWindow('Stack: '+str(area))
                except:
                    pass

            # Show the matching results
            if self.showMatch is True:
                cv2.rectangle(cc8, mcorn[0], mcorn[1], 255, 1)
                cv2.circle(cc8, mx, 5, 255, 1)
                cv2.imshow('Match: '+str(area), cc8)
            else:
                try:
                    cv2.destroyWindow('Match: '+str(area))
                except:
                    pass

            # Draw the tracked area on the image
            corn = area.getCorners()
            cv2.rectangle(self.workingFrame,
                          corn[0], corn[1],
                          (0, 255, 0), 1)

#            self.showFrame()
#            raw_input('wait')
项目:cnn-traffic-light-evaluation    作者:takeitallsource    | 项目源码 | 文件源码
def contrast_normalization(image):
    blurred = cv2.GaussianBlur(image, (3,3), 0)
    return cv2.Laplacian(blurred, cv2.CV_8U, 3)
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def main(_):
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"

    # --------- load classifier ------- #
    cascade = cv2.CascadeClassifier(FLAGS.cascade_xml)
    model, x, keep_prob = get_nn_classifier()

    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'

    img = utils.getImage(image_path)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)

    delta = [-2, -1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 0.99, 0.995, 0.999, 0.9995, 0.9999]

    start = time.time()
    candidates = cascade.detectMultiScale(img, scaleFactor=FLAGS.scaleFactor, minNeighbors=FLAGS.minNeighbors, maxSize=(FLAGS.max_window_size,FLAGS.max_window_size))
    detected = nn_classification(candidates, img, model, x, keep_prob, delta)
    elapsed = (time.time() - start)  

    print 'detection time: %d' % elapsed

    # ------------- evaluation --------------#

    ground_truth_data = utils.get_ground_truth_data(csv_path)

    for j in xrange(0, len(delta)):
        detected[j] = [Rect(x, y, w, h) for (x,y,w,h) in detected[j]]
        tp, fn, fp = utils.evaluate(ground_truth_data, detected[j])

        # ----------------output ----------------#
        # image output
        """
        img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        for (x,y,w,h) in detected[j]:
            cv2.rectangle(img_out, (x-w/2,y-h/2),(x+w/2,y+h/2), [0,255,0], 3)

        for c in ground_truth_data:
            cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)

        output_file = "out" + '_' + str(datetime.datetime.now())
        cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
        """
        # csv output
        with open(FLAGS.output_dir + FLAGS.out + '.csv', 'ab') as file:
            writer = csv.writer(file, delimiter=',')
            writer.writerow([FLAGS.test, str(elapsed), str(len(ground_truth_data)), delta[j], FLAGS.minNeighbors, FLAGS.scaleFactor, 
                            str(len(detected[j])), str(tp), str(fp), str(fn)])
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def main():
    image_path = FLAGS.test
    csv_path = os.path.splitext(image_path)[0] + ".csv"

    # ------------ load classifier ---------- #
    cascade = cv2.CascadeClassifier(FLAGS.cascade_xml)

    # -------------- open image --------------#
    img = utils.getImage(image_path)
    img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)

    # ---------- object detection ------------#    
    print 'starting detection of ' + FLAGS.test + '...'

    start = time.time()
    detected = cascade.detectMultiScale(img, scaleFactor=FLAGS.scaleFactor, minNeighbors=FLAGS.minNeighbors, maxSize=(FLAGS.max_window_size, FLAGS.max_window_size))
    elapsed = (time.time() - start)
    print 'detection time: %d' % elapsed

    # ------------- evaluation --------------#
    detected = [Rect(x, y, w, h) for (x,y,w,h) in detected]
    ground_truth_data = utils.get_ground_truth_data(csv_path)

    tp, fn, fp = utils.evaluate(ground_truth_data, detected)

    # ----------------output ----------------#
    # image output
    """
    img_out = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

    for c in ground_truth_data:
        cv2.circle(img_out, (c[0], c[1]), 3, [0,0,255],3)

    for r in detected:
        cv2.rectangle(img_out, (r.x, r.y), (r.x2(), r.y2()), [0,255,0], 2)

    output_file = "out" + '_' + str(datetime.datetime.now())
    cv2.imwrite(FLAGS.output_dir + output_file + '.png', img_out)
    """
    # csv output
    with open(FLAGS.output_dir + 'results.csv', 'ab') as file:
        writer = csv.writer(file, delimiter=',')
        writer.writerow([FLAGS.test, str(elapsed),str(len(ground_truth_data)), str(FLAGS.scaleFactor), 
                         str(FLAGS.minNeighbors), str(len(detected)), str(tp), str(fp), str(fn)])