Python cv2 模块,getTickCount() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用cv2.getTickCount()

项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def lipSegment(self, img):
        # self.t1 = cv2.getTickCount()
        lipHull = self.dlib_obj.get_landmarks(img)
        cv2.drawContours(img, lipHull, -1, (255, 0, 0), 2)
        (x, y), (MA, ma), angle = cv2.fitEllipse(lipHull)
        a = ma/2
        b = MA/2

        eccentricity = sqrt(pow(a, 2)-pow(b, 2))
        eccentricity = round(eccentricity/a, 2)

        cv2.putText(img, 'E = '+str(round(eccentricity, 3)), (10, 350),
                    self.font, 1, (255, 0, 0), 1)

        if(eccentricity < 0.9):
            self.flags.cmd = 'b'
        else:
            self.flags.cmd = 'f'

        if angle < 80:
            self.flags.cmd = 'l'
        elif angle > 100:
            self.flags.cmd = 'r'

        cv2.putText(img, 'Cmd = ' + self.flags.cmd, (10, 300),  self.font,  1,
                    (0, 0, 255), 1, 16)
        # self.t2 = cv2.getTickCount()
        # print "Time = ", (self.t2-self.t1)/cv2.getTickFrequency()
        return img
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def mk_list(jmax,imax,tmx,inc,dem,sang):
    f_list=[]
    t_setx=t_set[:,0,0] 
    #r_setx=t_set[:,0,0]
    r_setx=np.array([0.0,0.2,0.4,0.6,0.8,1.0])
    nlen=len(t_setx)
    s_setx=smin*np.ones(nlen)
    t=cv2.getTickCount() 
    for j in range(jmax):
        temp=[]
        for i in range(imax):
            frefx=fref(tmx[j,i],inc[j,i],dem[j,i]/1000.0,t_setx,r_setx,s_setx,sang[j,i])
            temp=temp+[frefx]
        if j % 100 == 0: print j,(cv2.getTickCount()-t)/cv2.getTickFrequency()
        f_list=f_list+[temp]
    return f_list
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def mclass(cls1,cls2,cls3,nmax):
    num=imax*jmax
    cls1x=np.float32(cls1.reshape(num))
    cls2x=np.float32(cls2.reshape(num))
    cls3x=np.float32(cls3.reshape(num))
    data=np.array([[cls1x],[cls2x],[cls3x]])
    data=data.reshape(3,num)
    data=np.transpose(data)
    datax=data[::100,:]
    t=cv2.getTickCount()
    codebook, destortion = scipy.cluster.vq.kmeans(datax, nmax, iter=10, thresh=1e-05)
    print (cv2.getTickCount()-t)/cv2.getTickFrequency()
    t=cv2.getTickCount()
    code, dist = scipy.cluster.vq.vq(data, codebook)
    print (cv2.getTickCount()-t)/cv2.getTickFrequency()
    return code.reshape(jmax,imax)
项目:key-face    作者:gabrielilharco    | 项目源码 | 文件源码
def detectTemplateMatching(self, img):
        self.templateMatchingCurrentTime = cv2.getTickCount()
        duration = (self.templateMatchingCurrentTime - self.templateMatchingStartTime)/cv2.getTickFrequency()
        if duration > settings.templateMatchingDuration or self.trackedFaceTemplate[2] == 0 or self.trackedFaceTemplate[3] == 0:
            self.foundFace = False
            self.isTemplateMatchingRunning = False
            return

        faceTemplate = self.getSubRect(img, self.trackedFaceTemplate)
        roi = self.getSubRect(img, self.trackedFaceROI)
        match = cv2.matchTemplate(roi, faceTemplate, cv2.TM_SQDIFF_NORMED)
        cv2.normalize(match, match, 0, 1, cv2.NORM_MINMAX, -1)


        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(match)
        foundTemplate = (
            minLoc[0] + self.trackedFaceROI[0],
            minLoc[1] + self.trackedFaceROI[1],
            self.trackedFaceTemplate[2],
            self.trackedFaceTemplate[3])

        self.trackedFaceTemplate = foundTemplate
        self.trackedFace = self.scaleRect(self.trackedFaceTemplate, img, 2)
        self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
项目:StreamMotionDetection    作者:henry54809    | 项目源码 | 文件源码
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def predict_image(flag):
    t_start = cv2.getTickCount()
    config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))

    with open(os.path.join(flag.ckpt_dir, flag.ckpt_name, 'model.json'), 'r') as json_file:
            loaded_model_json = json_file.read()
    model = model_from_json(loaded_model_json)
    weight_list = sorted(glob(os.path.join(flag.ckpt_dir, flag.ckpt_name, "weight*")))
    model.load_weights(weight_list[-1])
    print "[*] model load : %s"%weight_list[-1]
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000 
    print "[*] model loading Time: %.3f ms"%t_total

    imgInput = cv2.imread(flag.test_image_path, 0)
    input_data = imgInput.reshape((1,256,256,1))

    t_start = cv2.getTickCount()
    result = model.predict(input_data, 1)
    t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
    print "Predict Time: %.3f ms"%t_total

    imgMask = (result[0]*255).astype(np.uint8)
    imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
    _, imgMask = cv2.threshold(imgMask, int(255*flag.confidence_value), 255, cv2.THRESH_BINARY)
    imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
    # imgZero = np.zeros((256,256), np.uint8)
    # imgMaskColor = cv2.merge((imgZero, imgMask, imgMask))
    imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.3, 0.0)
    output_path = os.path.join(flag.output_dir, os.path.basename(flag.test_image_path))
    cv2.imwrite(output_path, imgShow)
    print "SAVE:[%s]"%output_path
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def train_visualization_seg(self, model, epoch):
        image_name_list = sorted(glob(os.path.join(self.flag.data_path,'train/IMAGE/*/*.png')))
        print image_name_list

        image_name = image_name_list[-1]
        image_size = self.flag.image_size

        imgInput = cv2.imread(image_name, self.flag.color_mode)
        output_path = self.flag.output_dir
        input_data = imgInput.reshape((1,image_size,image_size,self.flag.color_mode*2+1))

        t_start = cv2.getTickCount()
        result = model.predict(input_data, 1)
        t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
        print "[*] Predict Time: %.3f ms"%t_total

        imgMask = (result[0]*255).astype(np.uint8)
        imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
        imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
        imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.4, 0.0)
        output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
        cv2.imwrite(output_path, imgShow)
        # print "SAVE:[%s]"%output_path
        # cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
        # cv2.namedWindow("show", 0)
        # cv2.resizeWindow("show", 800, 800)
        # cv2.imshow("show", imgShow)
        # cv2.waitKey(1)
项目:SelfDrivingRCCar    作者:sidroopdaska    | 项目源码 | 文件源码
def retrieve_data_set():
    """Retrieve data from all the .npz files and aggregate it into a
    data set for mlp training"""

    start_time = cv2.getTickCount()

    print("Loading data set...")

    image_array = np.zeros((1, 38400), 'float')
    label_array = np.zeros((1, 4), 'float')

    # Retrieve a list of pathname that matches the below expr
    data_set = glob.glob("data_set/*.npz")

    if not data_set:
        print("No data set in directory, exiting!")
        sys.exit()

    for single_npz in data_set:
        with np.load(single_npz) as data:
            temp_images = data["images"]
            temp_labels = data["labels"]

        image_array = np.vstack((image_array, temp_images))
        label_array = np.vstack((label_array, temp_labels))

    X = np.float32(image_array[1:, :])
    Y = np.float32(label_array[1:, :])
    print("Image array shape: {0}".format(X.shape))
    print("Label array shape: {0}".format(Y.shape))

    end_time = cv2.getTickCount()
    print("Data set load duration: {0}"
          .format((end_time - start_time) // cv2.getTickFrequency()))

    return X, Y
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def mk_ref(jmax,imax,f_list,tau,eref):
    ref=np.zeros(imax*jmax).reshape(jmax,imax)
    #t=cv2.getTickCount() 
    for j in range(jmax):
        for i in range(imax):
            fref=f_list[j][i]
        temp=fref(tau[j,i],eref[j,i])
            if temp < 0.0 : temp = 0.0
            if temp > 1.0 : temp = 1.0
            ref[j,i] = temp
        #if j % 100 == 0: print j,(cv2.getTickCount()-t)/cv2.getTickFrequency()
    print np.mean(ref),np.std(ref)
    return ref
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def mk_tau(jmax,imax,f_list,eref,cref):
    taux=np.zeros(imax*jmax).reshape(jmax,imax)
    x=t_set 
    #t=cv2.getTickCount() 
    for j in range(jmax):
        for i in range(imax):
            fref=f_list[j][i]
            res=np.polyfit(x,fref(x,eref[j,i]),3) # 3rd order
        taux[j,i]=iestimate(res,cref[j,i])
        #if j % 100 == 0: print i,(cv2.getTickCount()-t)/cv2.getTickFrequency()
    temp=np.where(np.isnan(taux)==True)
    print np.nanmean(taux),np.nanstd(taux),len(temp[0])
    return taux

#5/25/2016
项目:AtmosphericCorrection    作者:y-iikura    | 项目源码 | 文件源码
def mk_rad(jmax,imax,inc,dem,sang,tau,ref,eref):
    rad=np.zeros(imax*jmax).reshape(jmax,imax)
    #t=cv2.getTickCount() 
    for j in range(jmax):
      rad[j,:] = radiance(ref[j,:],inc[j,:],tau[j,:],dem[j,:]/1000,eref[j,:],sang[j,:])
      #if j % 100 == 0: print j,(cv2.getTickCount()-t)/cv2.getTickFrequency()
    print np.mean(rad),np.std(rad)
    return rad
项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()
项目:OpenCV-Snapchat-DogFilter    作者:sguduguntla    | 项目源码 | 文件源码
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()
项目:memegenerator    作者:Huxwell    | 项目源码 | 文件源码
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()
项目:key-face    作者:gabrielilharco    | 项目源码 | 文件源码
def __init__(self, haarCascadeFilePath, videoCapture):
        try:
            self.faceCascade = cv2.CascadeClassifier(haarCascadeFilePath)
        except:
            raise Exception("Error creating haar cascade classifier. Are you sure file " + haarCascadeFilePath + " exists?")
        self.videoCapture = videoCapture
        self.foundFace = False
        self.trackedFace = None
        self.trackedFaceROI = None
        self.templateMatchingStartTime = cv2.getTickCount()
        self.templateMatchingCurrentTime = cv2.getTickCount()
        self.isTemplateMatchingRunning = False
        self.img = None
项目:key-face    作者:gabrielilharco    | 项目源码 | 文件源码
def detectCascade(self, img, roiOnly=False):
        if roiOnly:
            searchArea = self.getSubRect(img, self.trackedFaceROI)
        else:
            searchArea = img

        width = searchArea.shape[0]
        faces = self.faceCascade.detectMultiScale(searchArea,
            scaleFactor = settings.cascadeScaleFactor,
            minNeighbors = settings.cascadeMinNeighbors,
            minSize = (int(width*settings.minimumFaceSize), int(width*settings.minimumFaceSize)),
            maxSize = (int(width*settings.maximumFaceSize), int(width*settings.maximumFaceSize)))

        if len(faces) == 0:
            if roiOnly and not self.isTemplateMatchingRunning:
                self.isTemplateMatchingRunning = True
                self.templateMatchingStartTime = cv2.getTickCount()
            elif not roiOnly:
                self.foundFace = False
                self.trackedFace = None;
            return

        self.foundFace=True
        # track only the largest face
        self.trackedFace = self.largestFace(faces)
        # adjust face position if necessary
        if roiOnly:
            self.trackedFace[0] += self.trackedFaceROI[0]
            self.trackedFace[1] += self.trackedFaceROI[1]
        self.trackedFaceTemplate = self.scaleRect(self.trackedFace, img, 0.5)
        self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
项目:autonomous_driving    作者:StatueFungus    | 项目源码 | 文件源码
def main():
    print os.getcwd()
    capture = cv2.VideoCapture('../data/road.avi')
    ticks = 0

    img_prep = ImagePreparator()
    vis = Visualizer()

    while capture.isOpened():
        prevTick = ticks
        ticks = cv2.getTickCount()
        t = (ticks - prevTick) / cv2.getTickFrequency()
        fps = int(1 / t)

        retval, image = capture.read()
        height, width, channels = image.shape

        rect = np.array([
            [0, 200],
            [639, 200],
            [639, 359],
            [0, 359]], dtype="float32")

        dst = np.array([
            [0, 0],
            [639, 0],
            [350, 699],
            [298, 699]], dtype="float32")

        # Aufbereitung des Bilder
        warped = img_prep.warp_perspective(image.copy(), rect, dst, (640, 700))
        roi = img_prep.define_roi(warped, 0.6, 0, 0.40)
        gray = img_prep.grayscale(roi)
        blur = img_prep.blur(gray, (5, 5), 0)
        canny = img_prep.edge_detection(blur, 50, 150, 3)

        vis.draw_text(canny, 'FPS: ' + str(fps), 1, (255, 0, 0), (int(width*0.015), int(height*0.15)))
        vis.show(canny)