Python cv2 模块,CAP_PROP_POS_MSEC 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用cv2.CAP_PROP_POS_MSEC

项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def createTrainingData(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    try:
        os.makedirs("trainingdata_"+filename)
    except OSError:
        pass
    os.chdir("trainingdata_"+filename)
    length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(vidcap.get(cv2.CAP_PROP_FPS))
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        cv2.imwrite('p1_'+str(time)+".png",p1)
        cv2.imwrite('p2_'+str(time)+".png",p2)
    os.chdir("..")
项目:youtube-8m    作者:google    | 项目源码 | 文件源码
def frame_iterator(filename, every_ms=1000, max_num_frames=300):
  """Uses OpenCV to iterate over all frames of filename at a given frequency.

  Args:
    filename: Path to video file (e.g. mp4)
    every_ms: The duration (in milliseconds) to skip between frames.
    max_num_frames: Maximum number of frames to process, taken from the
      beginning of the video.

  Yields:
    RGB frame with shape (image height, image width, channels)
  """
  video_capture = cv2.VideoCapture()
  if not video_capture.open(filename):
    print >> sys.stderr, 'Error: Cannot open video file ' + filename
    return
  last_ts = -99999  # The timestamp of last retrieved frame.
  num_retrieved = 0

  while num_retrieved < max_num_frames:
    # Skip frames
    while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts:
      if not video_capture.read()[0]:
        return

    last_ts = video_capture.get(CAP_PROP_POS_MSEC)
    has_frames, frame = video_capture.read()
    if not has_frames:
      break
    yield frame
    num_retrieved += 1
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def jump(self, frame=None, ms=None):
        assert frame is not ms, "Use either frame or ms, not both!"
        if frame:
            if frame >= self.frames:
                raise ReadFrameException('Cannot jump to frame (frame does not exists)')
            self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame)
        if ms:
            self.vid.set(cv2.CAP_PROP_POS_MSEC, ms)
        # print("jumped to frame %i" % self.vid.get(cv2.CAP_PROP_POS_FRAMES))
项目:mlAlgorithms    作者:gu-yan    | 项目源码 | 文件源码
def video2image():
    cap = cv2.VideoCapture(args.videopath)

    i = 0
    name = 0
    while cap.isOpened():
        ret, frame = cap.read()
        time = cap.get(cv2.CAP_PROP_POS_MSEC)
        index = cap.get(cv2.CAP_PROP_POS_FRAMES)
        print('frames: %d   ---   times: %f' % (index, time/1000))
        if frame is None:
            break
        i += 1
        if args.videofps <= 0:
            cv2.imwrite(os.path.join(args.imagepath, str(name)) + '.jpg', frame)
            name += 1
            print('(height: %d, weight: %d, channel: %d)' % frame.shape)
        else:
            if i == args.videofps:
                # cv2.imshow('frame', frame)
                # k = cv2.waitKey(20)
                # k = cv2.waitKey(0)
                i = 0
                cv2.imwrite(os.path.join(args.imagepath, str(name)) + '.jpg', frame)
                name += 1
                print('(height: %d, weight: %d, channel: %d)' % frame.shape)

    cap.release()
    cv2.destroyAllWindows()
项目:trackingtermites    作者:dmrib    | 项目源码 | 文件源码
def update_termites(self):
        """Update termites positions.

        Args:
            None.
        Returns:
            None.
        """
        for termite in self.termites:
            found, termite.position = termite.tracker.update(self.video_source.current_frame)
            if not found:
                print('Lost termite no.{}'.format(termite.identity))
                self.video_source.pause()
            termite.path.append([int(termite.position[0]), int(termite.position[1]),
                                self.video_source.source.get(cv2.CAP_PROP_POS_MSEC)])
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def videoToImageArray(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    pictures = [[],[]]
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)      # just cue to 20 sec. position
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        pictures[0].append(p1)
        pictures[1].append(p2)
    return pictures
项目:F1-Telemetry    作者:MrPranz    | 项目源码 | 文件源码
def vidocr_to_csv(video,vcoords,tbcoords,f1app=True):
    # inputs: 
    # video = video file as a string
    # vcoords = array of pixel coordinates [top left x, top left y, bottom right x, bottom right y] for velocity
    # tbcoords = array of pixel coordinates [top left x, top left y, bottom right x, bottom right y] for throttle/brake
    # f1app = boolean, default = True, use True for video from the F1 app, False for onboard video.
    # outputs .csv file with each line as a row of each extracted parameter. 

    # capture video via opencv
    vid = cv2.VideoCapture(video)
    s,frm = vid.read()

    v_all = []
    t_all = []
    thr_all = []
    brk_all = []

    step = 1
    total_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT);
    print(total_frames)
    i = int(total_frames*0);
    vid.set(0,i)

    # go through each frame and extract data
    while s:
        if i >= int(total_frames):
            break
        s,frm = vid.read()
        if i%step == 0 or i == total_frames-1:
            v_temp = velocity_ocr(frm,vcoords,f1app)
            t_temp = vid.get(cv2.CAP_PROP_POS_MSEC)/1e3
            v_all += [v_temp]
            # thr_temp = throttle_ocr(frm,tbcoords)
            # brk_temp = brake_ocr(frm,tbcoords)
            # thr_all += [thr_temp]
            # brk_all += [brk_temp]
        if i%200 == 0:
            print(v_temp,t_temp,i)
        i += 1

    t_all = get_timestamps(video)
    # save data to .csv with same filename as videofile
    with open(video[0:-4]+'.csv', 'w', newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(v_all)
        writer.writerow(t_all)
        # writer.writerow(thr_all)
        # writer.writerow(brk_all)
        writer.writerow([])
        writer.writerow([])

    print(video,"completed.")