Python cv2 模块,CAP_PROP_FPS 实例源码

我们从Python开源项目中,提取了以下33个代码示例,用于说明如何使用cv2.CAP_PROP_FPS

项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize())
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def __get_video_properties(self):
        self.frame_dims = (int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                           int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)))

        self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        self.fps = self.cap.get(cv2.CAP_PROP_FPS)
        if self.cap.get(cv2.CAP_PROP_MONOCHROME) == 0.0:
            self.n_channels = 3
        else:
            self.n_channels = 1
        self.frame = np.zeros((self.frame_dims[0], self.frame_dims[1], self.n_channels), np.uint8)
        self.previous_frame = np.zeros((self.frame_dims[0], self.frame_dims[1], self.n_channels), np.uint8)
项目:SpaceX    作者:shahar603    | 项目源码 | 文件源码
def skip_from_launch(cap, time):
    """
    Move the capture to T+time (time can be negative) and returns the frame index.
    :param cap: OpenCV capture
    :param time: delta time from launch to skip to
    :return: index of requested frame
    """
    number_of_frames = int(cap.get(cv2.CAP_PROP_FPS) * time) + skip_to_launch(cap)

    number_of_frames = max(number_of_frames, 0)
    number_of_frames = min(number_of_frames, cap.get(cv2.CAP_PROP_FRAME_COUNT))

    cap.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames)

    return number_of_frames
项目:SpaceX    作者:shahar603    | 项目源码 | 文件源码
def get_data(cap, file, t0, out):
    dt = 1 / cap.get(cv2.CAP_PROP_FPS)
    time = 0
    prev_vel = 0
    prev_time = 0

    extract.skip_from_launch(cap, t0)

    _, frame = cap.read()
    t0, v0, a0 = extract.extract_telemetry(frame)

    if t0 is not None:
        prev_time = t0
        prev_vel = v0
        time = extract.rtnd(t0 + dt, 3)

    while frame is not None:
        velocity = extract.calc_velocity(frame)
        altitude = extract.calc_altitude(frame)

        if velocity is not None and altitude is not None and \
                check_data(prev_vel, prev_time, velocity, time):

            json_data = data_to_json(time, velocity, altitude)

            if out:
                print(data_to_json(time, velocity, altitude))

            write_to_file(file, json_data)
            prev_vel = velocity
            prev_time = time

        _, frame = cap.read()
        time = extract.rtnd(time + dt, 3)
项目:SpaceX    作者:shahar603    | 项目源码 | 文件源码
def main():
    args = set_args()
    file = open(args.destination_path, 'w')
    cap = create_capture(args.capture_path)

    if cap is None or cap.get(cv2.CAP_PROP_FPS) == 0:
        if youtube_url_validation(args.capture_path):
            print("Cannot access video in URL. Please check the URL is a valid YouTube video")
            exit(2)

        print("Cannot access video in file. Please make sure the path to the file is valid")
        exit(3)

    get_data(cap, file, to_float(args.launch_time), args.out)
项目:RFCN-tensorflow    作者:xdever    | 项目源码 | 文件源码
def __init__(self, path):
        self.path = path
        self.fps = 30
        self.currName="unknown"

        if os.path.isdir(self.path):
            self.type=self.DIR
            self.files = glob.glob(self.path+'/*.*')
            self.currFile = 0
        elif self.path.split('.')[-1].lower() in ['avi', 'mp4', 'mpeg', "mov"]:
            self.cap = cv2.VideoCapture(opt.i)
            self.frameIndex = 0
            self.type=self.VID
            if int((cv2.__version__).split('.')[0]) < 3:
                self.fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
            else:
                self.fps = cap.get(cv2.CAP_PROP_FPS)

            if self.fps<1:
                self.fps=1
        elif self.path.split('.')[-1].lower() in ['png','bmp','jpg','jpeg']:
            self.type=self.IMG
            self.fps=0
        else:
            print("Invalid file: "+self.path)
            sys.exit(-1)
项目:Enchain    作者:Zhehua-Hu    | 项目源码 | 文件源码
def showVideoInfo(video_path):
    try:
        vhandle = cv2.VideoCapture(video_path)  # For read Chinease-name video
        fps = vhandle.get(cv2.CAP_PROP_FPS)
        count = vhandle.get(cv2.CAP_PROP_FRAME_COUNT)
        size = (int(vhandle.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(vhandle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        ret, firstframe = vhandle.read()
        if ret:
            print("FPS: %.2f" % fps)
            print("COUNT: %.2f" % count)
            print("WIDTH: %d" % size[0])
            print("HEIGHT: %d" % size[1])
            return vhandle, fps, size, firstframe
        else:
            print("Video can not read!")
    except:
        "Error in showVideoInfo"
项目:video-classification    作者:canhnd58    | 项目源码 | 文件源码
def split_video(video):
  vidcap = cv2.VideoCapture(video)
  total_frame = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
  fps = int(vidcap.get(cv2.CAP_PROP_FPS))

  for index in range(0, TOTAL_IMAGE):
    if index == 0:
      frame_no = fps * 2 - 1 # The frame in 2nd second
    else:
      frame_no = (total_frame / TOTAL_IMAGE) * index - 1
    vidcap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
    success, image = vidcap.read()
    cv2.imwrite("frame%d.jpg" % index, image)
项目:SBB4-damage-tracker    作者:whorn    | 项目源码 | 文件源码
def createTrainingData(filename,time_start,time_stop):
    vidcap = cv2.VideoCapture(filename)
    try:
        os.makedirs("trainingdata_"+filename)
    except OSError:
        pass
    os.chdir("trainingdata_"+filename)
    length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(vidcap.get(cv2.CAP_PROP_FPS))
    for time in range(time_start,time_stop):
        vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000)
        success,image = vidcap.read()
        image = cv2.medianBlur(image,7)
        resized = imutils.resize(image, width=800)
        p1 = resized[370:430,220:300]
        p2 = resized[370:430,520:600]
        p1 = cv2.Canny(p1, 400, 100, 255)
        p2 = cv2.Canny(p2, 400, 100, 255)
        cv2.imwrite('p1_'+str(time)+".png",p1)
        cv2.imwrite('p2_'+str(time)+".png",p2)
    os.chdir("..")
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def run_video_and_refine(video_file_path, split_info):
    """
    Args:
        1. video_file_path: File path to be used for training AVSR.
        2. split_info:      This variable is similar to the one used in the case of audio-only model training.
                            For more details, please see util.data_preprocessing.find_text_and_time_limits() 
                            and util.data_preprocessing..split() functions.

    """
    video_name = video_file_path.split('/')[-1].split(".")[0]

    stream = VideoStream(video_file_path)
    stream.start()

    FPS = stream.stream.get(cv2.CAP_PROP_FPS)

    if round(FPS) != FIXED_VIDEO_FPS:
        print "[WARNING] Ignoring " + video_file_path + " due to incorrect FPS.(Required FPS=30)"
        stream.stop()
        return []

    time_elapsed = 0.00
    time_end = split_info[-1][1][1] 

    # `split_info` is a list of tuples of the form (x, (y, z))
    frame_count = 0

    data = []

    for info, i in zip(split_info, range(len(split_info))):
        # `info` is tuple of the form (x, (y, z)), y = split_time_start, z = split_time_end
        # Please refer to util.data_preprocessing.find_text_and_time_limits() for more details.
        while time_elapsed < info[1][0]:
            frame = stream.read()
            frame_count += 1

            time_elapsed = frame_count*(1.00/FPS)

        # This section of code does actual preprocessing
        all_frames = []

        while time_elapsed <= info[1][1]:
            frame = stream.read()
            frame = resize(frame, IMAGE_WIDTH)
            frame_count += 1
            all_frames.append(frame)

            time_elapsed = frame_count*(1.0000/FPS)

        mouth_regions = validate_frames(all_frames)

        if mouth_regions is not None:
            split_file_name = video_name + str(i).zfill(5)
            data.append((split_file_name, video_name, mouth_regions, info)) 

    stream.stop()
    return data
项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize())
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def initialize(self):
        # Initialize video capture
        self.cap = cv2.VideoCapture(self.ID)

        frameRate = 20.0
        frameWidth = 640
        frameHeight = 480

        if cv2.__version__[0] == "2":
            # Latest Stable Version (2.x)
            self.cap.set(cv2.cv.CV_CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameHeight)
        else:
            # version 3.1.0 (BETA)
            self.cap.set(cv2.CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)

        self.thresh = 0.4
        self.thresh_img = np.zeros((frameHeight, frameWidth, 3), dtype=np.uint8)
项目:MyoSEMG    作者:LuffyDai    | 项目源码 | 文件源码
def __init__(self, name, ui=myo_emg.Ui_MainWindow(), cap=capture.capture()):
        super(VideoThread, self).__init__()
        self.flag = True
        self.start_flag = False
        self.support_flag = True
        self.name = name
        self.cap = cap
        self.ui = ui
        self.out = None
        self.stop_signal.connect(self.stop_play)
        self.image_siganl.connect(self.saving_video)
        self.start_signal.connect(self.start_capture)
        self.cap.path_signal.connect(self.save_video)
        if self.name == "Video":
            self.videoLabel = ui.Video
            self.camera = cv2.VideoCapture("instruction.mp4")
            self.fps = self.camera.get(cv2.CAP_PROP_FPS)
        elif self.name == "Camera":
            self.videoLabel = ui.Camera
            self.camera = cv2.VideoCapture(camera_port)
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def __init__(self, args, main_out_vid_name="foreground"):
        self.mask_writer = None
        super().__init__(args, main_out_vid_name)
        if args.mask_output_video == "":
            args.mask_output_video = args.in_video[:-4] + "_bs_mask.mp4"

        self.mask_writer = cv2.VideoWriter(os.path.join(self.datapath, args.mask_output_video),
                                           cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                                           self.cap.get(cv2.CAP_PROP_FPS),
                                           (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                            int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))),
                                           False)

        self.mask_writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())
        self.foreground_writer = self.writer
        self.foreground = None
        self.mask = None
项目:trackingtermites    作者:dmrib    | 项目源码 | 文件源码
def fps(self):
        """Original video fps count."""
        return self.source.get(cv2.CAP_PROP_FPS)
项目:slide-transition-detector    作者:brene    | 项目源码 | 文件源码
def __init__(self, stream):
        """
        Default Initializer
        :param stream: the video stream from OpenCV
        """
        self.stream = stream
        self.len = stream.get(cv2.CAP_PROP_FRAME_COUNT)
        self.fps = stream.get(cv2.CAP_PROP_FPS)
项目:Cameo    作者:veraposeidon    | 项目源码 | 文件源码
def _writevideoframe(self):
        if not self.is_writingvideo:
            return
        if self._videoWriter is None:
            fps = self._capture.get(cv2.CAP_PROP_FPS)
            if fps == 0.0:
                # FPS???????
                if self._frameElapsed < 20:
                    # wait until more frame elapse so that the estimate is more stable.
                    return
                else:
                    fps = self._fpsEstimate
                    # print fps
            size = (int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            self._videoWriter = cv2.VideoWriter(self._videoFilename, self._videoEncoding, fps, size)

        self._videoWriter.write(self._frame)
项目:derplearning    作者:John-Ellis    | 项目源码 | 文件源码
def init_camera(self):
        self.video_path = os.path.join(self.recording_path, 'camera_front.mp4')
        self.cap = cv2.VideoCapture(self.video_path)
        self.n_frames = min(len(self.timestamps), int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)))
        self.fps = int(self.cap.get(cv2.CAP_PROP_FPS))
        self.frame_id = -1
        self.read()
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def extract_and_store_visual_features(video_file_path, json_dir, json_name):
    """
        This function extracts visual features from the given video and stores them in a JSON file.
        This function will NOT be used while preparing training/test/dev data for our model.
        This function only runs through `bin/run_exported_model_AVSR.py` script.
    Args:
        1. video_file_path:     File path for the video to be processed.
        2. json_dir:            Dir where JSON file will exist.
        3. json_name:           Name to be given to JSON file.
    """
    # first load all trained models: DLIB's models and Autoencoder.
    load_trained_models()
    load_AE()

    # Start reading video.
    stream = VideoStream(video_file_path)
    stream.start()

    FPS = stream.stream.get(cv2.CAP_PROP_FPS)

    if round(FPS) != FIXED_VIDEO_FPS:
        return False

    mouth_regions = []
    prev_frame_faces = []
    # Keeps record of previous frames and faces found in previous frames. Initially it is empty.

    while not stream.is_empty():
        frame = stream.read()
        frame = resize(frame, IMAGE_WIDTH)
        rects = FACE_DETECTOR_MODEL(frame, 0)

        # rects = FACE_DETECTOR_MODEL(frame, 1) 
        # This can increase efficiency in detecting mutiple faces; this can help in correct speaker detection.
        # However, computing time will also be increased

        region = crop_suitable_face(rects, frame, prev_frame_faces)
        if region is None:
            # If no proper face region could be detected, we fill normally distributed random values
            mouth_regions.append(np.random.normal(size=32*32))
        else:
            mouth_regions.append(region.reshape(32*32))

        # Update previous frames. Max size of `prev_frame_faces` = 5
        prev_frame_faces.append((rects, frame))
        if len(prev_frame_faces) > 5 : prev_frame_faces.remove(prev_frame_faces[0])

    mouth_regions = np.array(mouth_regions)

    # Find and store visual features.
    encode_and_store(mouth_regions, json_dir, json_name.split('.')[0])
    AUTO_ENCODER.close()

    return True
项目:detect_ads    作者:ilyaluk    | 项目源码 | 文件源码
def process_loop(self):
        cap_sd = cv2.VideoCapture('pipe:%d' % self.pipe_r_sd)
        fps = cap_sd.get(cv2.CAP_PROP_FPS)
        fps = 24

        self.ws.log('pr: opened video')

        det = cut_detector.ContentDetector()
        orb = cv2.ORB_create()

        i = 0
        scene = 0

        while cap_sd.isOpened():
            if self.do_stop:
                break

            ret, frame = cap_sd.read()
            # self.ws.log('pr: read frame', i)

            is_cut = det.process_frame(i, frame)

            kp = orb.detect(frame, None)

            kp, des = orb.compute(frame, kp)

            # img2 = cv2.drawKeypoints(frame, kp, None, color=(0,255,0), flags=0)
            # cv2.imshow('', img2)
            # cv2.waitKey(0)
            # 1/0

            if is_cut:
                self.ws.log('pr: cut at', i)
                preview = 'previews/frame%04d_%d.png' % (scene, i)
                cv2.imwrite(preview, frame)
                self.ws.sendJSON({
                    'scene': scene,
                    'time': frame2time(i, fps),
                    'preview': preview
                })
                scene += 1

            # call to descriptor callback
            self.desc_cb(i, des, is_cut)

            self.processed = i

            i += 1

        cap_sd.release()
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def get_video_stats(self):
        assert(self.sourcetype == 'file')

        framecount = self._video.get(cv2.CAP_PROP_FRAME_COUNT)
        fps = self._video.get(cv2.CAP_PROP_FPS)

        return framecount, fps
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def main():
    args = parser.parse_args()

    mask = cv2.imread(args.mask_file, cv2.IMREAD_COLOR)

    cap = cv2.VideoCapture(args.in_video)
    last_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1

    if args.end_with == -1:
        args.end_with = last_frame
    else:
        if args.end_with > last_frame:
            print(
                "Warning: specified end frame ({:d})is beyond the last video frame ({:d}). Stopping after last frame.".format(
                    args.end_with, last_frame))
            args.end_with = last_frame

    if args.out_video == "":
        args.out_video = args.in_video[:-4] + "_masked.mp4"

    writer = cv2.VideoWriter(args.out_video, cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                             cap.get(cv2.CAP_PROP_FPS),
                             (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))), True)
    writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())

    if args.start_from > 0:
        cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_from)

    total_frame_span = args.end_with - args.start_from
    frame_counter = 0
    if args.frame_count == -1:
        cur_frame_number = args.start_from
        while cur_frame_number < args.end_with:
            process_frame(cap, writer, mask)
            frame_counter += 1
            amount_done = frame_counter / total_frame_span
            update_progress(amount_done)
            cur_frame_number += 1
    else:
        frame_interval = total_frame_span // args.frame_count
        for i_frame in range(args.start_from, args.end_with, frame_interval):
            cap.set(cv2.CAP_PROP_POS_FRAMES, i_frame)
            process_frame(cap, writer, mask)
            frame_counter += 1
            amount_done = frame_counter / args.frame_count
            update_progress(amount_done)


    cap.release()
    writer.release()
    return 0
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def __init__(self, args, out_postfix="_out", with_video_output=True):
        self.global_video_offset = 0
        self.flip_video = False
        self.datapath = "./"
        self.__dict__.update(vars(args))
        self.writer = None

        if os.path.exists("settings.yaml"):
            stream = open("settings.yaml", mode='r')
            self.settings = load(stream, Loader=Loader)
            stream.close()
            self.datapath = self.settings['datapath'].replace("<current_user>", getuser())
            print("Processing path: ", self.datapath)
            if 'raw_options' in self.settings:
                raw_options = self.settings['raw_options']
                if self.in_video in raw_options:
                    self.global_video_offset = raw_options[args.in_video]['global_offset']
                    self.flip_video = raw_options[args.in_video]['flip']

        self.cap = None
        self.reload_video()
        print("Processing video file {:s}.".format(self.in_video))

        last_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)

        if self.end_with == -1:
            self.end_with = last_frame
        else:
            if self.end_with > last_frame:
                print(("Warning: specified end frame ({:d}) is beyond the last video frame" +
                       " ({:d}). Stopping after last frame.")
                      .format(self.end_with, last_frame))
                self.end_with = last_frame

        print("Frame range: {:d}--{:d}".format(self.start_from, self.end_with))

        if with_video_output:
            if self.out_video == "":
                self.out_video = args.in_video[:-4] + "_" + out_postfix + ".mp4"

            self.writer = cv2.VideoWriter(os.path.join(self.datapath, self.out_video),
                                          cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                                          self.cap.get(cv2.CAP_PROP_FPS),
                                          (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                           int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))),
                                          True)
            self.writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())
        else:
            self.writer = None

        self.frame = None
        self.cur_frame_number = None
项目:news-shot-classification    作者:gshruti95    | 项目源码 | 文件源码
def my_detect_scenes_file(path, scene_list, detector_list, stats_writer = None,
                  downscale_factor = 0, frame_skip = 0, quiet_mode = False,
                  perf_update_rate = -1, save_images = False,
                  timecode_list = None):

    cap = cv2.VideoCapture()
    frames_read = -1
    video_fps = -1
    if not timecode_list:
        timecode_list = [0, 0, 0]

    cap.open(path)
    # file_name = os.path.split(path)[1]
    file_name = path
    if not cap.isOpened():
        if not quiet_mode:
            print('[PySceneDetect] FATAL ERROR - could not open video %s.' % 
                path)
        return (video_fps, frames_read)
    elif not quiet_mode:
        print('[PySceneDetect] Parsing video %s...' % file_name)

    video_width  = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    video_fps    = cap.get(cv2.CAP_PROP_FPS)
    if not quiet_mode:
        print('[PySceneDetect] Video Resolution / Framerate: %d x %d / %2.3f FPS' % (
            video_width, video_height, video_fps ))
        if downscale_factor >= 2:
            print('[PySceneDetect] Subsampling Enabled (%dx, Resolution = %d x %d)' % (
                downscale_factor, video_width / downscale_factor, video_height / downscale_factor ))
        print('Verify that the above parameters are correct'
            ' (especially framerate, use --force-fps to correct if required).')

    frames_list = []
    for tc in timecode_list:
        if isinstance(tc, int):
            frames_list.append(tc)
        elif isinstance(tc, float):
            frames_list.append(int(tc * video_fps))
        elif isinstance(tc, list) and len(tc) == 3:
            secs = float(tc[0] * 60 * 60) + float(tc[1] * 60) + float(tc[2])
            frames_list.append(int(secs * video_fps))
        else:
            frames_list.append(0)

    start_frame, end_frame, duration_frames = 0, 0, 0
    if len(frames_list) == 3:
        start_frame, end_frame, duration_frames = frames_list

    frames_read = scenedetect.detect_scenes(cap, scene_list, detector_list, stats_writer,
                                downscale_factor, frame_skip, quiet_mode,
                                perf_update_rate, save_images, file_name,
                                start_frame, end_frame, duration_frames)

    cap.release()
    return (video_fps, frames_read)
项目:calibration    作者:ciechowoj    | 项目源码 | 文件源码
def open_capture(name, frame):
    capture = cv2.VideoCapture(name)
    width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = capture.get(cv2.CAP_PROP_FPS)

    capture.set(cv2.CAP_PROP_POS_FRAMES, frame)

    print("Opened ", name, ", resolution ", width, "x", height, ", fps ", fps, flush = True)

    return capture
项目:yolo_light    作者:chrisgundling    | 项目源码 | 文件源码
def camera(self, file, SaveVideo):
    if file == 'camera':
        file = 0
    else:
        assert os.path.isfile(file), \
        'file {} does not exist'.format(file)

    camera = cv2.VideoCapture(file)
    self.say('Press [ESC] to quit demo')
    assert camera.isOpened(), \
    'Cannot capture source'

    elapsed = int()
    start = timer()

    cv2.namedWindow('', 0)
    _, frame = camera.read()
    height, width, _ = frame.shape
    cv2.resizeWindow('', width, height)

    if SaveVideo:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if file == 0:
          fps = 1 / self._get_fps(frame)
          if fps < 1:
            fps = 1
        else:
            fps = round(camera.get(cv2.CAP_PROP_FPS))
        videoWriter = cv2.VideoWriter('video.avi', fourcc, fps, (width, height))

    while camera.isOpened():
        _, frame = camera.read()
        if frame is None:
            print ('\nEnd of Video')
            break
        preprocessed = self.framework.preprocess(frame)
        feed_dict = {self.inp: [preprocessed]}
        net_out = self.sess.run(self.out,feed_dict)[0]
        processed = self.framework.postprocess(net_out, frame, False)
        if SaveVideo:
            videoWriter.write(processed)
        cv2.imshow('', processed)
        elapsed += 1
        if elapsed % 5 == 0:
            sys.stdout.write('\r')
            sys.stdout.write('{0:3.3f} FPS'.format(
                elapsed / (timer() - start)))
            sys.stdout.flush()
        choice = cv2.waitKey(1)
        if choice == 27: break

    sys.stdout.write('\n')
    if SaveVideo:
        videoWriter.release()
    camera.release()
    cv2.destroyAllWindows()
项目:DVR-Scan    作者:Breakthrough    | 项目源码 | 文件源码
def _load_input_videos(self):
        """ Opens and checks that all input video files are valid, can
        be processed, and have the same resolution and framerate. """
        self.video_resolution = None
        self.video_fps = None
        self.frames_total = 0
        if not len(self.video_paths) > 0:
            return False
        for video_path in self.video_paths:
            cap = cv2.VideoCapture()
            cap.open(video_path)
            video_name = os.path.basename(video_path)
            if not cap.isOpened():
                if not self.suppress_output:
                    print("[DVR-Scan] Error: Couldn't load video %s." % video_name)
                    print("[DVR-Scan] Check that the given file is a valid video"
                          " clip, and ensure all required software dependencies"
                          " are installed and configured properly.")
                cap.release()
                return False
            curr_resolution = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                               int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            curr_framerate = cap.get(cv2.CAP_PROP_FPS)
            self.frames_total += cap.get(cv2.CAP_PROP_FRAME_COUNT)
            cap.release()
            if self.video_resolution is None and self.video_fps is None:
                self.video_resolution = curr_resolution
                self.video_fps = curr_framerate
                if not self.suppress_output:
                    print("[DVR-Scan] Opened video %s (%d x %d at %2.3f FPS)." % (
                        video_name, self.video_resolution[0],
                        self.video_resolution[1], self.video_fps))
            # Check that all other videos specified have the same resolution
            # (we'll assume the framerate is the same if the resolution matches,
            # since the VideoCapture FPS information is not always accurate).
            elif curr_resolution != self.video_resolution:
                if not self.suppress_output:
                    print("[DVR-Scan] Error: Can't append clip %s, video resolution"
                          " does not match the first input file." % video_name)
                return False
            else:
                if not self.suppress_output:
                    print("[DVR-Scan] Appended video %s." % video_name)
        # If we get to this point, all videos have the same parameters.
        return True
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def setup_camera(device_number):
    cam = cv2.VideoCapture(device_number)
    # result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,cam_width)
    # result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,cam_height)
    result3 = cam.set(cv2.CAP_PROP_FPS,1)
    return cam
项目:Enchain    作者:Zhehua-Hu    | 项目源码 | 文件源码
def videoSlice(video_path, save_path, progressbarsetter=None, save_type="png", img_comp=0, start_idx=1):
    """

    :param video_path:
    :param save_path:
    :param save_type:
    :param img_comp: default0:
                    None Higher number increase compressive level
                    png[0-9], jpg[0-100]
    :return:
    """

    # For read Chinease-name video
    vid_handle = cv2.VideoCapture(video_path)
    # vid_handle = cv2.VideoCapture(video_path.encode('utf-8'))
    fps = vid_handle.get(cv2.CAP_PROP_FPS)
    count = vid_handle.get(cv2.CAP_PROP_FRAME_COUNT)
    size = (int(vid_handle.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(vid_handle.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    prefix = os.path.basename(save_path)
    idx = start_idx  # start from 000001.xxx
    cnt_idx = 1

    params = None
    suffix = None
    if save_type.upper() == "JPEG" or save_type.upper() == "JPG":
        img_type = int(cv2.IMWRITE_JPEG_OPTIMIZE)
        suffix = ".jpg"
        params = [img_type, img_comp]
    elif save_type.upper() == "PNG":
        img_type = int(cv2.IMWRITE_PNG_COMPRESSION)
        suffix = ".png"
        params = [img_type, img_comp]
    else:
        print("Do not support %s format!" % save_type)

    while True:
        ret, frame = vid_handle.read()
        if ret:
            cur_progress = cnt_idx/(count/100.0)
            if progressbarsetter is not None:
                progressbarsetter(cur_progress)
            print("Progress %.2f%%" % cur_progress)
            img_name = save_path + "/" + ("%06d" % idx) + suffix
            # print img_name
            print params
            cv2.imwrite(img_name, frame, params)
            idx += 1
            cnt_idx += 1
        else:
            break
    print("Slicing Done!")
    return count
项目:rekognition-video-utils    作者:awslabs    | 项目源码 | 文件源码
def get_frame_rate(video):
    # Find OpenCV version
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    if int(major_ver)  < 3 :
        fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
    else:
        fps = video.get(cv2.CAP_PROP_FPS)
    print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)

    return fps
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def setup_camera():
    cam = cv2.VideoCapture(0)
    result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,720)
    result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,512)
    result3 = cam.set(cv2.CAP_PROP_FPS,1)
    return cam
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def setup_camera(device_number):
    cam = cv2.VideoCapture(device_number)
    result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,cam_width)
    result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,cam_height)
    result3 = cam.set(cv2.CAP_PROP_FPS,1)
    return cam
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def fps(self):
        return self.vid.get(cv2.CAP_PROP_FPS)