Python cv2 模块,CAP_PROP_POS_FRAMES 实例源码

我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用cv2.CAP_PROP_POS_FRAMES

项目:SpaceX    作者:shahar603    | 项目源码 | 文件源码
def skip_from_launch(cap, time):
    """
    Move the capture to T+time (time can be negative) and returns the frame index.
    :param cap: OpenCV capture
    :param time: delta time from launch to skip to
    :return: index of requested frame
    """
    number_of_frames = int(cap.get(cv2.CAP_PROP_FPS) * time) + skip_to_launch(cap)

    number_of_frames = max(number_of_frames, 0)
    number_of_frames = min(number_of_frames, cap.get(cv2.CAP_PROP_FRAME_COUNT))

    cap.set(cv2.CAP_PROP_POS_FRAMES, number_of_frames)

    return number_of_frames
项目:video-classification    作者:canhnd58    | 项目源码 | 文件源码
def split_video(video):
  vidcap = cv2.VideoCapture(video)
  total_frame = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
  fps = int(vidcap.get(cv2.CAP_PROP_FPS))

  for index in range(0, TOTAL_IMAGE):
    if index == 0:
      frame_no = fps * 2 - 1 # The frame in 2nd second
    else:
      frame_no = (total_frame / TOTAL_IMAGE) * index - 1
    vidcap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
    success, image = vidcap.read()
    cv2.imwrite("frame%d.jpg" % index, image)
项目:3DCNN    作者:bityangke    | 项目源码 | 文件源码
def video3d(self, filename, color=False, skip=True):
        cap = cv2.VideoCapture(filename)
        nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        if skip:
            frames = [x * nframe / self.depth for x in range(self.depth)]
        else:
            frames = [x for x in range(self.depth)]
        framearray = []

        for i in range(self.depth):
            cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
            ret, frame = cap.read()
            frame = cv2.resize(frame, (self.height, self.width))
            if color:
                framearray.append(frame)
            else:
                framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))

        cap.release()
        return np.array(framearray)
项目:360-stabilizer    作者:MateusZitelli    | 项目源码 | 文件源码
def __init__(self, videoPath, ratio, reprojThresh):
    self.videoPath = videoPath
    self.vidcap = cv2.VideoCapture(videoPath)
    initialFrame = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    self.videoSize = (int(self.vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(self.vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    self.vidcap.set(cv2.CAP_PROP_POS_FRAMES, initialFrame / 6)
    self.ratio = ratio
    self.reprojThresh = reprojThresh
    self.isv3 = imutils.is_cv3()
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def read_at_pos(self, ix_frame):
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, ix_frame)
        self.more_frames_remain, self.frame = self.cap.read()
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def read_previous_frame(self):
        """
        For traversing the video backwards.
        """
        cur_frame_ix = self.cap.get(cv2.CAP_PROP_POS_FRAMES)
        if cur_frame_ix == 0:
            self.more_frames_remain = False
            self.frame = None
            return
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, cur_frame_ix - 1)  # @UndefinedVariable
        self.more_frames_remain = True
        self.frame = self.cap.read()[1]
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def scroll_to_frame(self, i_frame):
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, i_frame)
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def scroll_to_beginning(self):
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0.0)
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def scroll_to_end(self):
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.frame_count - 1)
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def pos(self):
        return int(self.vid.get(cv2.CAP_PROP_POS_FRAMES))
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def jump(self, frame=None, ms=None):
        assert frame is not ms, "Use either frame or ms, not both!"
        if frame:
            if frame >= self.frames:
                raise ReadFrameException('Cannot jump to frame (frame does not exists)')
            self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame)
        if ms:
            self.vid.set(cv2.CAP_PROP_POS_MSEC, ms)
        # print("jumped to frame %i" % self.vid.get(cv2.CAP_PROP_POS_FRAMES))
项目:mlAlgorithms    作者:gu-yan    | 项目源码 | 文件源码
def video2image():
    cap = cv2.VideoCapture(args.videopath)

    i = 0
    name = 0
    while cap.isOpened():
        ret, frame = cap.read()
        time = cap.get(cv2.CAP_PROP_POS_MSEC)
        index = cap.get(cv2.CAP_PROP_POS_FRAMES)
        print('frames: %d   ---   times: %f' % (index, time/1000))
        if frame is None:
            break
        i += 1
        if args.videofps <= 0:
            cv2.imwrite(os.path.join(args.imagepath, str(name)) + '.jpg', frame)
            name += 1
            print('(height: %d, weight: %d, channel: %d)' % frame.shape)
        else:
            if i == args.videofps:
                # cv2.imshow('frame', frame)
                # k = cv2.waitKey(20)
                # k = cv2.waitKey(0)
                i = 0
                cv2.imwrite(os.path.join(args.imagepath, str(name)) + '.jpg', frame)
                name += 1
                print('(height: %d, weight: %d, channel: %d)' % frame.shape)

    cap.release()
    cv2.destroyAllWindows()
项目:self-supervision    作者:gustavla    | 项目源码 | 文件源码
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None, 0.5, 3, 15, 3, 5, 1.2, 0)
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            mags.append(mag)
            im0 = im1
        mag = np.sum(mags, 0)
        mag = mag.clip(min=0)
        norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, norm_mag))
        return outputs
项目:trackingtermites    作者:dmrib    | 项目源码 | 文件源码
def current_frame_number(self):
        """Current frame number in video sequence."""
        return self.source.get(cv2.CAP_PROP_POS_FRAMES)
项目:trackingtermites    作者:dmrib    | 项目源码 | 文件源码
def previous_frame(self, step_size):
        """Rewind the video for a given number of frames.

        Args:
            step_size (int): number of frames rewinded.
        Returns:
            None.
        """
        target_frame = max(1, self.current_frame_number - step_size)
        self.source.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
项目:slide-transition-detector    作者:brene    | 项目源码 | 文件源码
def get_frame(self, pos):
        """
        Returns the frame at the given position of the frame sequence
        :param pos: the position of the frame in the sequence
        :return: the frame at the specified position
        """
        assert pos >= 0
        self.stream.set(cv2.CAP_PROP_POS_FRAMES, self.len - 1)
        _, frame = self.stream.read()
        self.reader_head = pos + 1
        return frame
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def go_to_frame(self, i_frame):
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, float(self.global_video_offset + i_frame))
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def reload_video(self):
        if self.cap is not None:
            self.cap.release()
        self.cap = cv2.VideoCapture(os.path.join(self.datapath, self.in_video))
        if not self.cap.isOpened():
            raise ValueError("Could not open video!")
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.global_video_offset)
项目:derplearning    作者:John-Ellis    | 项目源码 | 文件源码
def seek(self, frame_id):
        if not self.legal_position(frame_id):
            print("seek failed illegal target:", frame_id)
            return False

        self.update_label(frame_id, self.frame_id, self.marker)

        self.frame_id = frame_id - 1
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.frame_id)
        print("%i %5i %6.3f %6.3f" % (self.frame_id, self.timestamps[self.frame_id],
                                    self.speeds[frame_id], self.steers[frame_id]))
        self.read()
        self.show = True
        return True
项目:derplearning    作者:John-Ellis    | 项目源码 | 文件源码
def predict(self, config, model_path):
        #Initialize the model output data vectors:
        self.m_speeds = np.zeros(self.n_frames, dtype=float)
        self.m_steers = np.zeros(self.n_frames, dtype=float)

        #opens the video config file
        video_config = util.load_config('%s/%s' % (self.recording_path, 'config.yaml') )
        bot = Inferer(  video_config = video_config, 
                        model_config = config,
                        folder = self.recording_path,
                        model_path = model_path)

        #Move the capture function to the start of the video
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, -1)

        for i in range(self.n_frames ):
            ret, frame = self.cap.read()

            if not ret or frame is None:
                print("read failed frame", frame_id)

            if i%1==0:
                (self.m_speeds[i], 
                 self.m_steers[i],
                 batch) = bot.evaluate(frame, 
                                    self.timestamps[i], 
                                    config, 
                                    model_path)

            else:
                self.m_speeds[i] = self.m_speeds[i-1]
                self.m_steers[i] = self.m_steers[i-1]

        #Restore the camera position to wherever it was before predict was called
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.frame_id)
项目:vae-style-transfer    作者:sunsided    | 项目源码 | 文件源码
def extract_video_frames(queue: PriorityQueue,
                         source: int,
                         cap: cv2.VideoCapture,
                         crop: Tuple[int, int, int, int],
                         target_width: int,
                         target_height: int,
                         frame_step: int=1,
                         display_progress: bool=False):
    window = 'video'
    if display_progress:
        cv2.namedWindow(window)

    while True:
        success, buffer = cap.read()
        if not success:
            break

        # crop borders
        buffer = buffer[crop[0]:-crop[2], crop[1]:-crop[3], :]
        buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA)

        frame = cap.get(cv2.CAP_PROP_POS_FRAMES)

        random_priority = random()
        queue.put((random_priority, (buffer, source)))

        if display_progress:
            cv2.imshow(window, buffer)
            if (cv2.waitKey(33) & 0xff) == 27:
                break

        cap.set(cv2.CAP_PROP_POS_FRAMES, frame + frame_step)

    if display_progress:
        cv2.destroyWindow(window)
项目:SpaceX    作者:shahar603    | 项目源码 | 文件源码
def skip_to_launch(cap):
    """
    Move cap to the frame before the launch
    :param cap: An OpenCV capture of the launch.
    :return: the index of first frame at T+00:00:00
    """
    initialize(1080)

    left = 0
    right = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1

    cap.set(cv2.CAP_PROP_POS_FRAMES, int((right+left)/2))

    while right > left+1:
        _, frame = cap.read()
        image = crop(frame, rects['sign'])

        if exists(image, sign_template, thresh_dict[frame.shape[0]][1]):
            left = int((right+left)/2)
        else:
            right = int((right+left)/2)

        cap.set(cv2.CAP_PROP_POS_FRAMES, int((right + left) / 2))

    cap.set(cv2.CAP_PROP_POS_FRAMES, left)

    return left
项目:esper    作者:scanner-research    | 项目源码 | 文件源码
def handle(self, *args, **options):
        with open(options['path']) as f:
            paths = [s.strip() for s in f.readlines()]

        # Only run the detector over videos we haven't yet processed
        filtered = []
        for path in paths:
            video = Video.objects.filter(path=path)
            if len(video) == 0: continue
            video = video[0]
            if Face.objects.filter(frame__video=video).count() == 0:
                break

        # Run the detector via Scanner
        threshold = [0.45, 0.6, 0.7]
        factor = 0.709
        vmargin = 0.2582651235637604
        hmargin = 0.3449094129917718
        out_size = 160
        minsize = 20
        batchsize = 200

        g1 = tf.Graph()
        g1.as_default()
        sess1 = tf.Session(config=tf.ConfigProto(log_device_placement=False))
        sess1.as_default()
        pnet, rnet, onet = align.detect_face.create_mtcnn(sess1, None)

        # Save the results to the database
        for path in paths:
            video = Video.objects.filter(path=path).get()
            max_frame = video.num_frames
            stride = int(math.ceil(video.fps)/2)

            frames = Frame.objects.filter(video=video)
            frame_map = {}

            for frame in frames:
                frame_map[frame.number] = frame

            batch_images = []
            frame_ids = []
            for frame_id in range(0, max_frame, stride):
                #invid.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
                #retval, img = invid.read()
                #if retval==False:
                #    break
                img = cv2.imread("assets/thumbnails/{}/frame_{}.jpg".format(os.environ['DATASET'], frame_map[frame_id].id))
                assert img is not None
                batch_images.append(img)
                frame_ids.append(frame_id)
                if len(batch_images) == batchsize:
                    self.detect_faces_batch(frame_ids, batch_images, minsize, pnet, rnet, onet, threshold, factor, vmargin, hmargin, video, frame_map)
                    batch_images = []
                    frame_ids = []
                #print frame_id

            if len(frame_ids) > 0:
                self.detect_faces_batch(frame_ids, batch_images, minsize, pnet, rnet, onet, threshold, factor,  vmargin, hmargin, video, frame_map)
项目:self-supervision    作者:gustavla    | 项目源码 | 文件源码
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    if not cap.isOpened():
        return []
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        flows = []
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None,
                        0.5, # py_scale
                        8,   # levels
                        int(40 * scale_factor),  # winsize
                        10,   # iterations
                        5,  # poly_n
                        1.1, # poly_sigma
                        cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
            #mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            #mags.append(mag)
            flows.append(flow)
            im0 = im1
        flow = (np.mean(flows, 0) / 100).clip(-1, 1)

        #flow = np.mean(flows, 0)
        #flow /= (flow.mean() * 5 + 1e-5)
        #flow = flow.clip(-1, 1)
        #flows = flows / (np.mean(flows, 0, keepdims=True) + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, flow))
    return outputs
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def main():
    args = parser.parse_args()

    mask = cv2.imread(args.mask_file, cv2.IMREAD_COLOR)

    cap = cv2.VideoCapture(args.in_video)
    last_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1

    if args.end_with == -1:
        args.end_with = last_frame
    else:
        if args.end_with > last_frame:
            print(
                "Warning: specified end frame ({:d})is beyond the last video frame ({:d}). Stopping after last frame.".format(
                    args.end_with, last_frame))
            args.end_with = last_frame

    if args.out_video == "":
        args.out_video = args.in_video[:-4] + "_masked.mp4"

    writer = cv2.VideoWriter(args.out_video, cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                             cap.get(cv2.CAP_PROP_FPS),
                             (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))), True)
    writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())

    if args.start_from > 0:
        cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_from)

    total_frame_span = args.end_with - args.start_from
    frame_counter = 0
    if args.frame_count == -1:
        cur_frame_number = args.start_from
        while cur_frame_number < args.end_with:
            process_frame(cap, writer, mask)
            frame_counter += 1
            amount_done = frame_counter / total_frame_span
            update_progress(amount_done)
            cur_frame_number += 1
    else:
        frame_interval = total_frame_span // args.frame_count
        for i_frame in range(args.start_from, args.end_with, frame_interval):
            cap.set(cv2.CAP_PROP_POS_FRAMES, i_frame)
            process_frame(cap, writer, mask)
            frame_counter += 1
            amount_done = frame_counter / args.frame_count
            update_progress(amount_done)


    cap.release()
    writer.release()
    return 0
项目:calibration    作者:ciechowoj    | 项目源码 | 文件源码
def open_capture(name, frame):
    capture = cv2.VideoCapture(name)
    width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = capture.get(cv2.CAP_PROP_FPS)

    capture.set(cv2.CAP_PROP_POS_FRAMES, frame)

    print("Opened ", name, ", resolution ", width, "x", height, ", fps ", fps, flush = True)

    return capture