Python cv2 模块,CAP_PROP_FRAME_HEIGHT 实例源码

我们从Python开源项目中,提取了以下42个代码示例,用于说明如何使用cv2.CAP_PROP_FRAME_HEIGHT

项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize())
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def __get_video_properties(self):
        self.frame_dims = (int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                           int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)))

        self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        self.fps = self.cap.get(cv2.CAP_PROP_FPS)
        if self.cap.get(cv2.CAP_PROP_MONOCHROME) == 0.0:
            self.n_channels = 3
        else:
            self.n_channels = 1
        self.frame = np.zeros((self.frame_dims[0], self.frame_dims[1], self.n_channels), np.uint8)
        self.previous_frame = np.zeros((self.frame_dims[0], self.frame_dims[1], self.n_channels), np.uint8)
项目:Enchain    作者:Zhehua-Hu    | 项目源码 | 文件源码
def showVideoInfo(video_path):
    try:
        vhandle = cv2.VideoCapture(video_path)  # For read Chinease-name video
        fps = vhandle.get(cv2.CAP_PROP_FPS)
        count = vhandle.get(cv2.CAP_PROP_FRAME_COUNT)
        size = (int(vhandle.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(vhandle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        ret, firstframe = vhandle.read()
        if ret:
            print("FPS: %.2f" % fps)
            print("COUNT: %.2f" % count)
            print("WIDTH: %d" % size[0])
            print("HEIGHT: %d" % size[1])
            return vhandle, fps, size, firstframe
        else:
            print("Video can not read!")
    except:
        "Error in showVideoInfo"
项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize())
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def initialize(self):
        # Initialize video capture
        self.cap = cv2.VideoCapture(self.ID)

        frameRate = 20.0
        frameWidth = 640
        frameHeight = 480

        if cv2.__version__[0] == "2":
            # Latest Stable Version (2.x)
            self.cap.set(cv2.cv.CV_CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameHeight)
        else:
            # version 3.1.0 (BETA)
            self.cap.set(cv2.CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)

        self.thresh = 0.4
        self.thresh_img = np.zeros((frameHeight, frameWidth, 3), dtype=np.uint8)
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def capture(self):
        capture = cv2.VideoCapture(self.device)
        capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)

        if not capture.isOpened():
            raise Exception('Failed to open camera capture.')

        for _ in range(0, 10):
            ret, img = capture.read()
            if not ret or self._blur_index(img) < self.blur_thres:
                time.sleep(0.5)
                continue
            capture.release()
            return img

        capture.release()
        raise Exception('Failed to capture image.')
项目:flight-stone    作者:asmateus    | 项目源码 | 文件源码
def pullData(self):
        try:
            if self.pth:
                capture = cv2.VideoCapture(1)
                capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.device['baudrate'][1])
                capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.device['baudrate'][0])

                while True:
                    if self.endtr:
                        capture.release()
                        return

                    _, frame = capture.read()
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                    self.response.assignStatus(RESPONSE_STATUS['OK'])
                    self.response.assignData(frame)

                    yield self.response
        except Exception:
            traceback.print_exc(file=sys.stdout)
            self.endCommunication()
            print('Video ended or interrupted, dropped Buffer')
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def __init__(self, args, main_out_vid_name="foreground"):
        self.mask_writer = None
        super().__init__(args, main_out_vid_name)
        if args.mask_output_video == "":
            args.mask_output_video = args.in_video[:-4] + "_bs_mask.mp4"

        self.mask_writer = cv2.VideoWriter(os.path.join(self.datapath, args.mask_output_video),
                                           cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                                           self.cap.get(cv2.CAP_PROP_FPS),
                                           (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                            int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))),
                                           False)

        self.mask_writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())
        self.foreground_writer = self.writer
        self.foreground = None
        self.mask = None
项目:remho    作者:teamresistance    | 项目源码 | 文件源码
def webcam(camera_id=0):
    # TODO update to support Python's 'with' construct
    camera = cv2.VideoCapture()
    camera.open(camera_id)

    # Use a smaller image size for faster processing
    camera.set(cv2.CAP_PROP_FRAME_WIDTH, 320.0)
    camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 240.0)

    if not camera.isOpened():
        raise CameraInitializationError('Camera #{0} failed to open.'.format(camera_id))

    while camera.isOpened():
        success, frame = camera.read()
        if success:
            yield frame

    camera.release()
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def create_capture(source = 0, fallback = presets['chess']):
    '''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
    '''
    source = str(source).strip()
    chunks = source.split(':')
    # handle drive letter ('c:', ...)
    if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
        chunks[1] = chunks[0] + ':' + chunks[1]
        del chunks[0]

    source = chunks[0]
    try: source = int(source)
    except ValueError: pass
    params = dict( s.split('=') for s in chunks[1:] )

    cap = None
    if source == 'synth':
        Class = classes.get(params.get('class', None), VideoSynthBase)
        try: cap = Class(**params)
        except: pass
    else:
        cap = cv2.VideoCapture(source)
        if 'size' in params:
            w, h = map(int, params['size'].split('x'))
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
    if cap is None or not cap.isOpened():
        print('Warning: unable to open video source: ', source)
        if fallback is not None:
            return create_capture(fallback, None)
    return cap
项目:360-stabilizer    作者:MateusZitelli    | 项目源码 | 文件源码
def __init__(self, videoPath, ratio, reprojThresh):
    self.videoPath = videoPath
    self.vidcap = cv2.VideoCapture(videoPath)
    initialFrame = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    self.videoSize = (int(self.vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(self.vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    self.vidcap.set(cv2.CAP_PROP_POS_FRAMES, initialFrame / 6)
    self.ratio = ratio
    self.reprojThresh = reprojThresh
    self.isv3 = imutils.is_cv3()
项目:cv-lane    作者:kendricktan    | 项目源码 | 文件源码
def __init__(self, center=int(cvsettings.CAMERA_WIDTH / 2), debug=False, is_usb_webcam=True, period_s=0.025):
        # Our video stream
        # If its not a usb webcam then get pi camera
        if not is_usb_webcam:
            self.vs = PiVideoStream(resolution=(cvsettings.CAMERA_WIDTH, cvsettings.CAMERA_HEIGHT))
            # Camera cvsettings
            self.vs.camera.shutter_speed = cvsettings.SHUTTER
            self.vs.camera.exposure_mode = cvsettings.EXPOSURE_MODE
            self.vs.camera.exposure_compensation = cvsettings.EXPOSURE_COMPENSATION
            self.vs.camera.awb_gains = cvsettings.AWB_GAINS
            self.vs.camera.awb_mode = cvsettings.AWB_MODE
            self.vs.camera.saturation = cvsettings.SATURATION
            self.vs.camera.rotation = cvsettings.ROTATION
            self.vs.camera.video_stabilization = cvsettings.VIDEO_STABALIZATION
            self.vs.camera.iso = cvsettings.ISO
            self.vs.camera.brightness = cvsettings.BRIGHTNESS
            self.vs.camera.contrast = cvsettings.CONTRAST

        # Else get the usb camera
        else:
            self.vs = WebcamVideoStream(src=0)
            self.vs.stream.set(cv2.CAP_PROP_FRAME_WIDTH, cvsettings.CAMERA_WIDTH)
            self.vs.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, cvsettings.CAMERA_HEIGHT)

        # Has camera started
        self.camera_started = False
        self.start_camera()  # Starts our camera

        # To calculate our error in positioning
        self.center = center

        # To determine if we actually detected lane or not
        self.detected_lane = False

        # debug mode on? (to display processed images)
        self.debug = debug

        # Time interval between in update (in ms)
        # FPS = 1/period_s
        self.period_s = period_s

        # Starting time
        self.start_time = time.time()

    # Mouse event handler for get_hsv
项目:cvloop    作者:shoeffner    | 项目源码 | 文件源码
def determine_size(self, capture):
        """Determines the height and width of the image source.

        If no dimensions are available, this method defaults to a resolution of
        640x480, thus returns (480, 640).
        If capture has a get method it is assumed to understand
        `cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the
        information. Otherwise it reads one frame from the source to determine
        image dimensions.

        Args:
            capture: the source to read from.

        Returns:
            A tuple containing integers of height and width (simple casts).
        """
        width = 640
        height = 480
        if capture and hasattr(capture, 'get'):
            width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
            height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
        else:
            self.frame_offset += 1
            ret, frame = capture.read()
            if ret:
                width = frame.shape[1]
                height = frame.shape[0]
        return (int(height), int(width))
项目:prototype    作者:chutsu    | 项目源码 | 文件源码
def __init__(self, index=0):
        self.frame = None
        self.capture = cv2.VideoCapture(0)
        self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        # self.capture.set(cv2.CAP_PROP_EXPOSURE, 0)
        # self.capture.set(cv2.CAP_PROP_GAIN, 0)

        # Define codec and create VideoWriter object
        # fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
        # self.out = cv2.VideoWriter('output.avi', fourcc, 120.0, (640, 480))
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def height(self):
        return int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
项目:learning-blind-motion-deblurring    作者:cgtuebingen    | 项目源码 | 文件源码
def shape(self):
        w = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
        return (h, w)
项目:Drowsiness-Detection    作者:MohamedSlama    | 项目源码 | 文件源码
def Camera():
    if len(sys.argv) > 1:
        CamNum = sys.argv[1]
    else:
        print("This's The Main Camera")
        CamNum = 0

    # Resize Capture
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 300);
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 300);

    return cv2.VideoCapture(int(CamNum))
项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def create_capture(source = 0, fallback = presets['chess']):
    '''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
    '''
    source = str(source).strip()
    chunks = source.split(':')
    # handle drive letter ('c:', ...)
    if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
        chunks[1] = chunks[0] + ':' + chunks[1]
        del chunks[0]

    source = chunks[0]
    try: source = int(source)
    except ValueError: pass
    params = dict( s.split('=') for s in chunks[1:] )

    cap = None
    if source == 'synth':
        Class = classes.get(params.get('class', None), VideoSynthBase)
        try: cap = Class(**params)
        except: pass
    else:
        cap = cv2.VideoCapture(source)
        if 'size' in params:
            w, h = map(int, params['size'].split('x'))
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
    if cap is None or not cap.isOpened():
        print('Warning: unable to open video source: ', source)
        if fallback is not None:
            return create_capture(fallback, None)
    return cap
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def height(self):
        return int(self._video.get(cv2.CAP_PROP_FRAME_HEIGHT))
项目:Smart-Car    作者:jimchenhub    | 项目源码 | 文件源码
def initCap():
    cap = cv2.VideoCapture(1)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH,common_config.CAP_WIDTH);
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT,common_config.CAP_HEIGHT);
    return cap
# ????,img????
项目:Smart-Car    作者:jimchenhub    | 项目源码 | 文件源码
def init(host='lenovo-pc', port=1234, capL_id=2, capR_id=1):
    global capL, capR, ltr, rtr, sock
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((host, port))
    capL = cv2.VideoCapture(capL_id)
    capL.set(cv2.CAP_PROP_FRAME_WIDTH, BINCAP_W)
    capL.set(cv2.CAP_PROP_FRAME_HEIGHT, BINCAP_H)
    capR = cv2.VideoCapture(capR_id)
    capR.set(cv2.CAP_PROP_FRAME_WIDTH, BINCAP_W)
    capR.set(cv2.CAP_PROP_FRAME_HEIGHT, BINCAP_H)
    ltr = GetFrameLThread()
    rtr = GetFrameRThread()
    ltr.start()
    rtr.start()
项目:seq2seq_temporal_attention    作者:aistairc    | 项目源码 | 文件源码
def extract_frames(path, stride=1):
    print(path)
    cap = cv2.VideoCapture(path)
    if not cap.isOpened():
        print("Error: Failed to open %s" % path)
        sys.exit(-1)

    try:
        FRAME_COUNT = cv2.CAP_PROP_FRAME_COUNT
        FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
        FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
    except AttributeError:
        FRAME_COUNT = cv2.cv.CV_CAP_PROP_FRAME_COUNT
        FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
        FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH

    number_of_frames = int(cap.get(FRAME_COUNT))
    length2 = number_of_frames // stride
    height = int(cap.get(FRAME_HEIGHT))
    width = int(cap.get(FRAME_WIDTH))
    frames = np.zeros((length2, height, width, 3), dtype=np.uint8)
    for frame_i in xrange(length2):
        _, image = cap.read()
        frames[frame_i] = image[:, :, :]
        for i in xrange(1, stride):
            _, image = cap.read()
    print(len(frames))
    return frames
项目:ISC-bot    作者:SuryaThiru    | 项目源码 | 文件源码
def __init__(self):
        self.cam = cv2.VideoCapture(0)

        self.w = 800
        self.h = 600

        self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, self.w)
        self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.h)
项目:Cameo    作者:veraposeidon    | 项目源码 | 文件源码
def _writevideoframe(self):
        if not self.is_writingvideo:
            return
        if self._videoWriter is None:
            fps = self._capture.get(cv2.CAP_PROP_FPS)
            if fps == 0.0:
                # FPS???????
                if self._frameElapsed < 20:
                    # wait until more frame elapse so that the estimate is more stable.
                    return
                else:
                    fps = self._fpsEstimate
                    # print fps
            size = (int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            self._videoWriter = cv2.VideoWriter(self._videoFilename, self._videoEncoding, fps, size)

        self._videoWriter.write(self._frame)
项目:OpenCV-Snapchat-DogFilter    作者:sguduguntla    | 项目源码 | 文件源码
def create_capture(source = 0, fallback = presets['chess']):
    '''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
    '''
    source = str(source).strip()
    chunks = source.split(':')
    # handle drive letter ('c:', ...)
    if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
        chunks[1] = chunks[0] + ':' + chunks[1]
        del chunks[0]

    source = chunks[0]
    try: source = int(source)
    except ValueError: pass
    params = dict( s.split('=') for s in chunks[1:] )

    cap = None
    if source == 'synth':
        Class = classes.get(params.get('class', None), VideoSynthBase)
        try: cap = Class(**params)
        except: pass
    else:
        cap = cv2.VideoCapture(source)
        if 'size' in params:
            w, h = map(int, params['size'].split('x'))
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
    if cap is None or not cap.isOpened():
        print('Warning: unable to open video source: ', source)
        if fallback is not None:
            return create_capture(fallback, None)
    return cap
项目:Cerebrum    作者:tyler-cromwell    | 项目源码 | 文件源码
def __init__(self, source, config):
        camera = config.camera()
        self.__source = source
        self.__camera = cv2.VideoCapture(source)
        self.__width = int(camera['width'])
        self.__height = int(camera['height'])
        self.__camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.__width)
        self.__camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.__height)
项目:memegenerator    作者:Huxwell    | 项目源码 | 文件源码
def create_capture(source = 0, fallback = presets['chess']):
    '''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
    '''
    source = str(source).strip()
    chunks = source.split(':')
    # handle drive letter ('c:', ...)
    if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
        chunks[1] = chunks[0] + ':' + chunks[1]
        del chunks[0]

    source = chunks[0]
    try: source = int(source)
    except ValueError: pass
    params = dict( s.split('=') for s in chunks[1:] )

    cap = None
    if source == 'synth':
        Class = classes.get(params.get('class', None), VideoSynthBase)
        try: cap = Class(**params)
        except: pass
    else:
        cap = cv2.VideoCapture(source)
        if 'size' in params:
            w, h = map(int, params['size'].split('x'))
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
    if cap is None or not cap.isOpened():
        print('Warning: unable to open video source: ', source)
        if fallback is not None:
            return create_capture(fallback, None)
    return cap
项目:remho    作者:teamresistance    | 项目源码 | 文件源码
def ip_camera(url):
    warnings.warn('Untested.', RuntimeWarning)

    # Alternate method, should work better than below if it works
    camera = cv2.VideoCapture(url)

    # Unnecessary if resizing through firmware
    # camera.set(cv2.CAP_PROP_FRAME_WIDTH, 320.0)
    # camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 240.0)

    if not camera.isOpened():
        raise CameraInitializationError('Failed to open camera at {}'.format(url))

    while camera.isOpened():
        success, frame = camera.read()
        if success:
            yield frame

    camera.release()

    # mjpeg stream decoding: http://stackoverflow.com/a/21844162/5623874

    # stream = request.urlopen(url)
    # bytes = b''
    # while True:
    #     bytes += stream.read(1024)
    #     a = bytes.find(b'\xff\xd8')
    #     b = bytes.find(b'\xff\xd9')
    #     if a != -1 and b != -1:
    #         jpg = bytes[a:b + 2]
    #         bytes = bytes[b + 2:]
    #         image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
    #         yield image


# TODO documentation
项目:Image-Processing-and-Feature-Detection    作者:amita-kapoor    | 项目源码 | 文件源码
def create_capture(source = 0, fallback = presets['chess']):
    '''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
    '''
    source = str(source).strip()
    chunks = source.split(':')
    # handle drive letter ('c:', ...)
    if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
        chunks[1] = chunks[0] + ':' + chunks[1]
        del chunks[0]

    source = chunks[0]
    try: source = int(source)
    except ValueError: pass
    params = dict( s.split('=') for s in chunks[1:] )

    cap = None
    if source == 'synth':
        Class = classes.get(params.get('class', None), VideoSynthBase)
        try: cap = Class(**params)
        except: pass
    else:
        cap = cv2.VideoCapture(source)
        if 'size' in params:
            w, h = map(int, params['size'].split('x'))
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
    if cap is None or not cap.isOpened():
        print('Warning: unable to open video source: ', source)
        if fallback is not None:
            return create_capture(fallback, None)
    return cap
项目:real_time_face_detection    作者:Snowapril    | 项目源码 | 文件源码
def main(parser):
    capture = cv2.VideoCapture(parser.source)
    src_width, src_height = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))

    if(parser.record == True):
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(parser.output_path,fourcc, 20.0, (src_width,src_height))

    cascPath = "./haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)

    while True:
        ret, frame = capture.read()


        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(30, 30),
            flags = cv2.CASCADE_SCALE_IMAGE
        )

        pred_features = detect_features(gray, faces, src_width, src_height, parser.width, parser.height)
        result_img = draw_features_point_on_image(frame, pred_features, src_width, src_height)

        for (x, y, w, h) in faces:
            cv2.rectangle(result_img, (x, y), (x+w, y+h), (0, 255, 0), 1)

        if (ret==True) and (parser.record == True):
            out.write(result_img)

        cv2.imshow('Video', result_img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()

    if parser.record == True:
        out.release()

    cv2.destroyAllWindows()
项目:StormCV2017    作者:2729StormRobotics    | 项目源码 | 文件源码
def main():
    logging.basicConfig(level=logging.DEBUG)
    print('Initializing NetworkTables')
    # NetworkTables.setTeam(2729)
    # NetworkTables.setClientMode()
    # NetworkTables.setIPAddress('10.27.29.202')
    NetworkTables.initialize(server='roboRIO-2729-frc.local')

    print('Creating pipeline')
    pipeline = Retrotape()

    print('Creating video capture')
    # stream = cv2
    #cap = cv2.VideoCapture("http://localhost:1181/?action=stream")
    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture(http://storm-rpi1.local:1181/?action=stream)
    print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
    cap.set(cv2.CAP_PROP_EXPOSURE, 0)
    cap.set(cv2.CAP_PROP_BRIGHTNESS, 30)


    print('Running pipeline')
    iteration = 0
    total = 0
    curr_time = datetime.now()
    while cap.isOpened():
        have_frame, frame = cap.read()
        if have_frame:
            pipeline.process(frame)
            currArea = extra_processing(pipeline)
            total += currArea
            iteration += 1
            # print(iteration)
            # print(total)
            table = NetworkTables.getTable('Vision')
            # ***EQUATION DISTANCE VS AREA*** 53111e^(-1.702x)
            # ***Inverse*** ln(A/53111)/-1.702 = d
            # ***Inverse Test2 -1.0142ln(.0000578938x)
            if(iteration % 200 == 0):
                table.putNumber('FPS', 200 / (datetime.now() - curr_time).total_seconds())
                curr_time = datetime.now()
                # table = NetworkTables.getTable('Vision')
                table.putNumber('Average Area', total / 200)
                print(total / 200)
                iteration = 0
                total = 0
            scaling = 6.8
            estDistance = distanceEstimate(currArea * scaling)
            table.putNumber('est_distance', estDistance)
    print('Capture closed')
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def _cam_setup(self, source, w, h, fps, exposure):
        cap = cv2.VideoCapture(source)
        if not cap.isOpened():
            return None

        atexit.register(cap.release)

        # Set width/height (note: this fails in OpenCV 2.4.x but works with 3.2)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)

        # Read a frame, just in case it's needed before setting params
        rval, _ = cap.read()
        if not rval:
            cap.release()
            return None

        # Hacks using v4l2-ctl to set capture parameters we can't control through OpenCV
        v4l2args = []

        # Change AWB setting once to make sure new settings are actually used
        # Not sure why this is required, but it appears to work.
        # Without this, white balance is still somehow automatic even
        # after setting it to manual below.
        self._v4l2_call("--set-ctrl=white_balance_auto_preset=1")

        # Set FPS (OpenCV requests 30, hardcoded)
        v4l2args.append("-p %d" % fps)

        # Set exposure (shutter speed/ISO)
        # exposure_time_absolute is given in multiples of 0.1ms.
        # Make sure fps above is not set too high (exposure time
        # will be adjusted automatically to allow higher frame rate)
        v4l2args.append("--set-ctrl=auto_exposure=1")  # 0=auto, 1=manual
        v4l2args.append("--set-ctrl=exposure_time_absolute=%d" % exposure)

        v4l2args.append("--set-ctrl=white_balance_auto_preset=0")
        v4l2args.append("--set-ctrl=red_balance=1000")
        v4l2args.append("--set-ctrl=blue_balance=1000")

        self._v4l2_call(" ".join(v4l2args))

        logging.info("Set exposure via v4l2-ctl.  Capturing/dumping frames so settings take effect before tracking starts.")
        for _ in range(5):
            cap.read()

        return cap
项目:Smart-Car    作者:jimchenhub    | 项目源码 | 文件源码
def run():
    capL = cv2.VideoCapture(1)
    capL.set(cv2.CAP_PROP_FRAME_WIDTH, BINCAP_W)
    capL.set(cv2.CAP_PROP_FRAME_HEIGHT, BINCAP_H)
    capR = cv2.VideoCapture(0)
    capR.set(cv2.CAP_PROP_FRAME_WIDTH, BINCAP_W)
    capR.set(cv2.CAP_PROP_FRAME_HEIGHT, BINCAP_H)
    while True:
        disparity = util.getDisparity(capL.read(), capR.read())
        cv2.imshow('disparity', disparity)
        orient = util.getOriention(disparity)
        if orient == 2:
            print 'forward'
            continue

        while orient == 0:
            print 'backward'
            disparity = util.getDisparity(capL.read(), capR.read())
            cv2.imshow('disparity', disparity)
            orient = util.getOriention(disparity)

        while orient==1:
            turn = turnTo(disparity)
            # turn left
            if turn==3:
                while orient!=2:
                    print 'turn left'
                    disparity = util.getDisparity(capL.read(), capR.read())
                    cv2.imshow('disparity', disparity)
                    orient = util.getOriention(disparity)
            # turn right
            elif turn==4:
                while orient!=2:
                    print 'turn right'
                    disparity = util.getDisparity(capL.read(), capR.read())
                    cv2.imshow('disparity', disparity)
                    orient = util.getOriention(disparity)
        # end
        if cv2.waitKey(1) & 0xFF==ord('q'):
            break
    cap0.release()
    cap1.release()
    cv2.destroyAllWindows()
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def main():
    args = parser.parse_args()

    mask = cv2.imread(args.mask_file, cv2.IMREAD_COLOR)

    cap = cv2.VideoCapture(args.in_video)
    last_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1

    if args.end_with == -1:
        args.end_with = last_frame
    else:
        if args.end_with > last_frame:
            print(
                "Warning: specified end frame ({:d})is beyond the last video frame ({:d}). Stopping after last frame.".format(
                    args.end_with, last_frame))
            args.end_with = last_frame

    if args.out_video == "":
        args.out_video = args.in_video[:-4] + "_masked.mp4"

    writer = cv2.VideoWriter(args.out_video, cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                             cap.get(cv2.CAP_PROP_FPS),
                             (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))), True)
    writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())

    if args.start_from > 0:
        cap.set(cv2.CAP_PROP_POS_FRAMES, args.start_from)

    total_frame_span = args.end_with - args.start_from
    frame_counter = 0
    if args.frame_count == -1:
        cur_frame_number = args.start_from
        while cur_frame_number < args.end_with:
            process_frame(cap, writer, mask)
            frame_counter += 1
            amount_done = frame_counter / total_frame_span
            update_progress(amount_done)
            cur_frame_number += 1
    else:
        frame_interval = total_frame_span // args.frame_count
        for i_frame in range(args.start_from, args.end_with, frame_interval):
            cap.set(cv2.CAP_PROP_POS_FRAMES, i_frame)
            process_frame(cap, writer, mask)
            frame_counter += 1
            amount_done = frame_counter / args.frame_count
            update_progress(amount_done)


    cap.release()
    writer.release()
    return 0
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def __init__(self, args, out_postfix="_out", with_video_output=True):
        self.global_video_offset = 0
        self.flip_video = False
        self.datapath = "./"
        self.__dict__.update(vars(args))
        self.writer = None

        if os.path.exists("settings.yaml"):
            stream = open("settings.yaml", mode='r')
            self.settings = load(stream, Loader=Loader)
            stream.close()
            self.datapath = self.settings['datapath'].replace("<current_user>", getuser())
            print("Processing path: ", self.datapath)
            if 'raw_options' in self.settings:
                raw_options = self.settings['raw_options']
                if self.in_video in raw_options:
                    self.global_video_offset = raw_options[args.in_video]['global_offset']
                    self.flip_video = raw_options[args.in_video]['flip']

        self.cap = None
        self.reload_video()
        print("Processing video file {:s}.".format(self.in_video))

        last_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1)

        if self.end_with == -1:
            self.end_with = last_frame
        else:
            if self.end_with > last_frame:
                print(("Warning: specified end frame ({:d}) is beyond the last video frame" +
                       " ({:d}). Stopping after last frame.")
                      .format(self.end_with, last_frame))
                self.end_with = last_frame

        print("Frame range: {:d}--{:d}".format(self.start_from, self.end_with))

        if with_video_output:
            if self.out_video == "":
                self.out_video = args.in_video[:-4] + "_" + out_postfix + ".mp4"

            self.writer = cv2.VideoWriter(os.path.join(self.datapath, self.out_video),
                                          cv2.VideoWriter_fourcc('X', '2', '6', '4'),
                                          self.cap.get(cv2.CAP_PROP_FPS),
                                          (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                           int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))),
                                          True)
            self.writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count())
        else:
            self.writer = None

        self.frame = None
        self.cur_frame_number = None
项目:news-shot-classification    作者:gshruti95    | 项目源码 | 文件源码
def my_detect_scenes_file(path, scene_list, detector_list, stats_writer = None,
                  downscale_factor = 0, frame_skip = 0, quiet_mode = False,
                  perf_update_rate = -1, save_images = False,
                  timecode_list = None):

    cap = cv2.VideoCapture()
    frames_read = -1
    video_fps = -1
    if not timecode_list:
        timecode_list = [0, 0, 0]

    cap.open(path)
    # file_name = os.path.split(path)[1]
    file_name = path
    if not cap.isOpened():
        if not quiet_mode:
            print('[PySceneDetect] FATAL ERROR - could not open video %s.' % 
                path)
        return (video_fps, frames_read)
    elif not quiet_mode:
        print('[PySceneDetect] Parsing video %s...' % file_name)

    video_width  = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    video_fps    = cap.get(cv2.CAP_PROP_FPS)
    if not quiet_mode:
        print('[PySceneDetect] Video Resolution / Framerate: %d x %d / %2.3f FPS' % (
            video_width, video_height, video_fps ))
        if downscale_factor >= 2:
            print('[PySceneDetect] Subsampling Enabled (%dx, Resolution = %d x %d)' % (
                downscale_factor, video_width / downscale_factor, video_height / downscale_factor ))
        print('Verify that the above parameters are correct'
            ' (especially framerate, use --force-fps to correct if required).')

    frames_list = []
    for tc in timecode_list:
        if isinstance(tc, int):
            frames_list.append(tc)
        elif isinstance(tc, float):
            frames_list.append(int(tc * video_fps))
        elif isinstance(tc, list) and len(tc) == 3:
            secs = float(tc[0] * 60 * 60) + float(tc[1] * 60) + float(tc[2])
            frames_list.append(int(secs * video_fps))
        else:
            frames_list.append(0)

    start_frame, end_frame, duration_frames = 0, 0, 0
    if len(frames_list) == 3:
        start_frame, end_frame, duration_frames = frames_list

    frames_read = scenedetect.detect_scenes(cap, scene_list, detector_list, stats_writer,
                                downscale_factor, frame_skip, quiet_mode,
                                perf_update_rate, save_images, file_name,
                                start_frame, end_frame, duration_frames)

    cap.release()
    return (video_fps, frames_read)
项目:calibration    作者:ciechowoj    | 项目源码 | 文件源码
def open_capture(name, frame):
    capture = cv2.VideoCapture(name)
    width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
    height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    fps = capture.get(cv2.CAP_PROP_FPS)

    capture.set(cv2.CAP_PROP_POS_FRAMES, frame)

    print("Opened ", name, ", resolution ", width, "x", height, ", fps ", fps, flush = True)

    return capture
项目:DVR-Scan    作者:Breakthrough    | 项目源码 | 文件源码
def _load_input_videos(self):
        """ Opens and checks that all input video files are valid, can
        be processed, and have the same resolution and framerate. """
        self.video_resolution = None
        self.video_fps = None
        self.frames_total = 0
        if not len(self.video_paths) > 0:
            return False
        for video_path in self.video_paths:
            cap = cv2.VideoCapture()
            cap.open(video_path)
            video_name = os.path.basename(video_path)
            if not cap.isOpened():
                if not self.suppress_output:
                    print("[DVR-Scan] Error: Couldn't load video %s." % video_name)
                    print("[DVR-Scan] Check that the given file is a valid video"
                          " clip, and ensure all required software dependencies"
                          " are installed and configured properly.")
                cap.release()
                return False
            curr_resolution = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                               int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            curr_framerate = cap.get(cv2.CAP_PROP_FPS)
            self.frames_total += cap.get(cv2.CAP_PROP_FRAME_COUNT)
            cap.release()
            if self.video_resolution is None and self.video_fps is None:
                self.video_resolution = curr_resolution
                self.video_fps = curr_framerate
                if not self.suppress_output:
                    print("[DVR-Scan] Opened video %s (%d x %d at %2.3f FPS)." % (
                        video_name, self.video_resolution[0],
                        self.video_resolution[1], self.video_fps))
            # Check that all other videos specified have the same resolution
            # (we'll assume the framerate is the same if the resolution matches,
            # since the VideoCapture FPS information is not always accurate).
            elif curr_resolution != self.video_resolution:
                if not self.suppress_output:
                    print("[DVR-Scan] Error: Can't append clip %s, video resolution"
                          " does not match the first input file." % video_name)
                return False
            else:
                if not self.suppress_output:
                    print("[DVR-Scan] Appended video %s." % video_name)
        # If we get to this point, all videos have the same parameters.
        return True
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def setup_camera(device_number):
    cam = cv2.VideoCapture(device_number)
    # result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,cam_width)
    # result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,cam_height)
    result3 = cam.set(cv2.CAP_PROP_FPS,1)
    return cam
项目:Enchain    作者:Zhehua-Hu    | 项目源码 | 文件源码
def videoSlice(video_path, save_path, progressbarsetter=None, save_type="png", img_comp=0, start_idx=1):
    """

    :param video_path:
    :param save_path:
    :param save_type:
    :param img_comp: default0:
                    None Higher number increase compressive level
                    png[0-9], jpg[0-100]
    :return:
    """

    # For read Chinease-name video
    vid_handle = cv2.VideoCapture(video_path)
    # vid_handle = cv2.VideoCapture(video_path.encode('utf-8'))
    fps = vid_handle.get(cv2.CAP_PROP_FPS)
    count = vid_handle.get(cv2.CAP_PROP_FRAME_COUNT)
    size = (int(vid_handle.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(vid_handle.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    prefix = os.path.basename(save_path)
    idx = start_idx  # start from 000001.xxx
    cnt_idx = 1

    params = None
    suffix = None
    if save_type.upper() == "JPEG" or save_type.upper() == "JPG":
        img_type = int(cv2.IMWRITE_JPEG_OPTIMIZE)
        suffix = ".jpg"
        params = [img_type, img_comp]
    elif save_type.upper() == "PNG":
        img_type = int(cv2.IMWRITE_PNG_COMPRESSION)
        suffix = ".png"
        params = [img_type, img_comp]
    else:
        print("Do not support %s format!" % save_type)

    while True:
        ret, frame = vid_handle.read()
        if ret:
            cur_progress = cnt_idx/(count/100.0)
            if progressbarsetter is not None:
                progressbarsetter(cur_progress)
            print("Progress %.2f%%" % cur_progress)
            img_name = save_path + "/" + ("%06d" % idx) + suffix
            # print img_name
            print params
            cv2.imwrite(img_name, frame, params)
            idx += 1
            cnt_idx += 1
        else:
            break
    print("Slicing Done!")
    return count
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def setup_camera():
    cam = cv2.VideoCapture(0)
    result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,720)
    result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,512)
    result3 = cam.set(cv2.CAP_PROP_FPS,1)
    return cam
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def setup_camera(device_number):
    cam = cv2.VideoCapture(device_number)
    result1 = cam.set(cv2.CAP_PROP_FRAME_WIDTH,cam_width)
    result2 = cam.set(cv2.CAP_PROP_FRAME_HEIGHT,cam_height)
    result3 = cam.set(cv2.CAP_PROP_FPS,1)
    return cam