Python cv2 模块,VideoCapture() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.VideoCapture()

项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize())
项目:CIKM2017    作者:MovieFIB    | 项目源码 | 文件源码
def load_videos(video_file):
    # print "load_videos"
    capture = cv2.VideoCapture(video_file)

    read_flag, frame = capture.read()
    vid_frames = []
    i = 1
    # print read_flag

    while (read_flag):
        # print i
        if i % 10 == 0:
            vid_frames.append(frame)
            #                print frame.shape
        read_flag, frame = capture.read()
        i += 1
    vid_frames = np.asarray(vid_frames, dtype='uint8')[:-1]
    # print 'vid shape'
    # print vid_frames.shape
    capture.release()
    print i
    return vid_frames
项目:Mini-Projects    作者:gaborvecsei    | 项目源码 | 文件源码
def CaptureImage():
    imageName = 'DontCare.jpg' #Just a random string
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
        rgbImage = frame #For capture the image in RGB color space

        # Display the resulting frame
        cv2.imshow('Webcam',rgbImage)
        #Wait to press 'q' key for capturing
        if cv2.waitKey(1) & 0xFF == ord('q'):
            #Set the image name to the date it was captured
            imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
            #Save the image
            cv2.imwrite(imageName, rgbImage)
            break
    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    #Returns the captured image's name
    return imageName
项目:nnp    作者:dribnet    | 项目源码 | 文件源码
def do_key_press(symbol, modifiers):
    global cur_vector
    print("SO: {}".format(symbol))
    if(symbol == key.R):
        if theApp.use_camera:
            theApp.set_camera_recording(not theApp.camera_recording)
    if(symbol == key.T):
        theApp.show_camera = not theApp.show_camera
    elif(symbol == key.SPACE):
        print("SPACEBAR")
        snapshot(None);
    elif(symbol == key.ESCAPE):
        print("ESCAPE")
        cv2.destroyAllWindows()
        if theApp.use_camera:
            cv2.VideoCapture(0).release()
        sys.exit(0)
项目:Enchain    作者:Zhehua-Hu    | 项目源码 | 文件源码
def showVideoInfo(video_path):
    try:
        vhandle = cv2.VideoCapture(video_path)  # For read Chinease-name video
        fps = vhandle.get(cv2.CAP_PROP_FPS)
        count = vhandle.get(cv2.CAP_PROP_FRAME_COUNT)
        size = (int(vhandle.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(vhandle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        ret, firstframe = vhandle.read()
        if ret:
            print("FPS: %.2f" % fps)
            print("COUNT: %.2f" % count)
            print("WIDTH: %d" % size[0])
            print("HEIGHT: %d" % size[1])
            return vhandle, fps, size, firstframe
        else:
            print("Video can not read!")
    except:
        "Error in showVideoInfo"
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
    cap = cv2.VideoCapture(src)
    while(1):
        ret, frame = cap.read()
        # To speed up processing; Almost real-time on my PC
        frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
        frame = cv2.flip(frame, 1)
        out = func(frame, args)
        if out is None:
            continue
        out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
        cv2.imshow(win_name, out)
        cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
        k = cv2.waitKey(delim_wait)

        if k == delim_key:
            cv2.destroyAllWindows()
            cap.release()
            return
项目:Motion-Sensor    作者:Paco1994    | 项目源码 | 文件源码
def video (seconds, frameRate):
    cap = cv2.VideoCapture(0)
    if(not cap.isOpened()):
        return "error"

    # Define the codec and create VideoWriter object
    fourcc = cv2.cv.CV_FOURCC(*'XVID')
    name = "media/video/" + time.strftime("%d-%m-%Y_%X")+".avi"
    out = cv2.VideoWriter(name, fourcc, frameRate, (640,480))
    program_starts = time.time()
    result = subprocess.Popen(["ffprobe", name], stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True)
    nFrames=0
    while(nFrames<seconds*frameRate):
        ret, frame = cap.read()
        if ret==True:
            out.write(frame)
            nFrames += 1
        else:
            break
    cap.release()
    return name
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web????????????
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def __init__(self ,ip, port, level, version):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.ADDR = (ip, port)
        if level <= 3:
            self.interval = level
        else:
            self.interval = 3
        self.fx = 1 / (self.interval + 1)
        if self.fx < 0.3:
            self.fx = 0.3
        if version == 4:
            self.sock = socket(AF_INET, SOCK_STREAM)
        else:
            self.sock = socket(AF_INET6, SOCK_STREAM)
        self.cap = cv2.VideoCapture(0)
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def __init__(self ,ip, port, showme, level, version):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.ADDR = (ip, port)
        self.showme = showme
        if level == 0:
            self.interval = 0
        elif level == 1:
            self.interval = 1
        elif level == 2:
            self.interval = 2
        else:
            self.interval = 3
        self.fx = 1 / (self.interval + 1)
        if self.fx < 0.3:
            self.fx = 0.3
        if version == 4:
            self.sock = socket(AF_INET, SOCK_STREAM)
        else:
            self.sock = socket(AF_INET6, SOCK_STREAM)
        self.cap = cv2.VideoCapture(0)
        print("VEDIO client starts...")
项目:dvd    作者:ajayrfhp    | 项目源码 | 文件源码
def MoG2(vid, min_thresh=800, max_thresh=10000):
    '''
    Args    : Video object and threshold parameters
    Returns : None
    '''
    cap = cv2.VideoCapture(vid)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    connectivity = 4
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        output = cv2.connectedComponentsWithStats(
            fgmask, connectivity, cv2.CV_32S)
        for i in range(output[0]):
            if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
                cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
                    output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
        cv2.imshow('detection', frame)
    cap.release()
    cv2.destroyAllWindows()
项目:pycreate2    作者:MomsFriendlyRobotCompany    | 项目源码 | 文件源码
def write():
    os.remove(filename)
    cap = cv2.VideoCapture(0)
    db = shelve.open(filename)
    imgs = []
    data = range(100)

    for i in range(100):
        ret, frame = cap.read()

        if ret:
            # jpg = frame  # 29 MB
            # jpg = cv2.imencode('.jpg', frame)  # make much smaller (1.9MB), otherwise 29MB
            jpg = cv2.imencode('.jpg', frame)[1].tostring()  # no bennefit with doing string (1.9MB)
            imgs.append(jpg)
            print('frame[{}] {}'.format(i, frame.shape))

        time.sleep(0.03)

    db['imgs'] = imgs
    db['data'] = data
    cap.release()
    db.close()
项目:3DCNN    作者:bityangke    | 项目源码 | 文件源码
def video3d(self, filename, color=False, skip=True):
        cap = cv2.VideoCapture(filename)
        nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        if skip:
            frames = [x * nframe / self.depth for x in range(self.depth)]
        else:
            frames = [x for x in range(self.depth)]
        framearray = []

        for i in range(self.depth):
            cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
            ret, frame = cap.read()
            frame = cv2.resize(frame, (self.height, self.width))
            if color:
                framearray.append(frame)
            else:
                framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))

        cap.release()
        return np.array(framearray)
项目:yolo_tensorflow    作者:hizhangp    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
    parser.add_argument('--weight_dir', default='weights', type=str)
    parser.add_argument('--data_dir', default="data", type=str)
    parser.add_argument('--gpu', default='', type=str)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    yolo = YOLONet(False)
    weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
    detector = Detector(yolo, weight_file)

    # detect from camera
    # cap = cv2.VideoCapture(-1)
    # detector.camera_detector(cap)

    # detect from image file
    imname = 'test/person.jpg'
    detector.image_detector(imname)
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def get_fps(source, Videolength):
    cap = cv2.VideoCapture(source)
    frame_counter = 0
    print "Calculating Frames per second . . . "

    while (True):
        # Capture frame-by-frame

        ret, frame = cap.read()
        if not ret:
            break

        frame_counter += 1

    cap.release()
    cv2.destroyAllWindows()
    fps = float(frame_counter/Videolength)
    print "\nFPS is " +str(fps)+"\n"

    return fps

#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def get_fps(source, Videolength):
    cap = cv2.VideoCapture("docs/video/traffic2")
    frame_counter = 0
    print "Calculating Frames per second . . . "

    while (True):
        # Capture frame-by-frame

        ret, frame = cap.read()
        if not ret:
            break

        frame_counter += 1

    cap.release()
    cv2.destroyAllWindows()
    fps = float(frame_counter/Videolength)
    print "\nFPS is " +str(fps)+"\n"

    return fps

#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
项目:temporal-segment-networks    作者:yjxiong    | 项目源码 | 文件源码
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list
项目:video_labeler    作者:hahnyuan    | 项目源码 | 文件源码
def __init__(self,labels,video_file,box_saver,border=30):
        """
        the GUI Labeler
        :param labels: the labels name string list
        :param video_file: the video file path
        :param border: the border of the center clip filed (white line around the video)
        :param save_dir: label result save path
        :param save_im: if write every cropped image to each label directory
        """
        self.cam = cv2.VideoCapture(video_file)
        self.video_stat = VideoStat(border)
        self.label_stat = LabelStat(labels)
        self.labels=labels
        self.box_saver=box_saver
        cv2.setMouseCallback("video", self.video_click)
        cv2.setMouseCallback("label", self.label_click)
        self.run()
项目:Automatic-Plate-Number-Recognition-APNR    作者:kagan94    | 项目源码 | 文件源码
def process_video(path_to_video):
    cap = cv2.VideoCapture(path_to_video)  # Load video

    while True:
        ret, frame = cap.read()
        print frame
        if ret is False or (cv2.waitKey(30) & 0xff) == 27: break  # Exit if the video ended

        mask = np.zeros_like(frame)  # init mask
        contours = find_contours(frame)
        plates, plates_images, mask = find_plate_numbers(frame, contours, mask)

        print "Plate Numbers: %s" % ", ".join(plates)

        processed_frame = cv2.add(frame, mask)  # Apply the mask to image
        cv2.imshow('frame', processed_frame)
    cv2.destroyAllWindows()
    cap.release()


###########################################
# Run The Program #########################
###########################################
项目:rekognition-video-utils    作者:awslabs    | 项目源码 | 文件源码
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
项目:Controller-Hand    作者:ardamavi    | 项目源码 | 文件源码
def main():
    # Get Model:
    model_file = open('Data/Model/model.json', 'r')
    model = model_file.read()
    model_file.close()
    model = model_from_json(model)
    model.load_weights("Data/Model/weights.h5")

    # Get camera:
    cap = cv2.VideoCapture(0)

    # Open game in browser:
    open_game(browser='chrome', url='http://apps.thecodepost.org/trex/trex.html')

    while 1:
        # Get image from camera:
        ret, img = cap.read()
        Y = predict(model, img)
        if Y == 0:
            release()
        elif Y == 1:
            press()
    cap.release()
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def main(args):

    saveFace = None;
    cap = cv2.VideoCapture(0)
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()
        faces = face_cascade.detectMultiScale(frame, 1.3, 5)
        if len(faces) > 0:
            saveFace = frame
            break;
        # Display the resulting frame
        cv2.imshow('frame',frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)

    mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    myImage = []
    for file in onlyfiles:
        isImage = None
        file = mypath + '/' + file
        isImage = imghdr.what(file)
        if isImage != None:
            myImage.append(file)

    #begin facenet
    cp.main(args,myImage);
项目:serbian-alpr    作者:golubaca    | 项目源码 | 文件源码
def start(self):
        """
        Create stream object.
        :return: stream
        """

        if self.protocol is "image":
            image = cv2.imread(self.ip_address, 1)
            plate = self.analize_plate.proccess(
                cv2.imencode('.jpg', image)[1].tostring())
            if plate:
                print plate['results']
        else:
            stream = cv2.VideoCapture(self.url)

            self.proccess(stream)
            # return stream
项目:party-pi    作者:JustinShenk    | 项目源码 | 文件源码
def initialize_webcam(self):
        """ Initialize camera and screenwidth and screenheight.
        """
        device = 'raspberry' if 'raspberrypi' in os.uname() else None
        self.raspberry = True if 'raspberry' == device else False
        if self.piCam:
            camera = self.setup_picamera()
            self.piCamera = camera
            return

        cam = cv2.VideoCapture(0)
        frame = None
        while frame is None:
            try:
                _, frame = cam.read()
                # Update class variables.
                self.screenheight, self.screenwidth = frame.shape[:2]
                cam.set(3, self.screenwidth)
                cam.set(4, self.screenheight)
            except:
                pass
        self.cam = cam
        return
项目:motorized_zoom_lens    作者:Kurokesu    | 项目源码 | 文件源码
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize())
项目:CanLauncher    作者:hazenhamather    | 项目源码 | 文件源码
def main():
    # cap = cv2.VideoCapture(0)
    os.system("cd /dev")
    os.system("v4l2-ctl --set-fmt-video=width=1920,height=1080,pixelformat=1")
    os.system("cd ~/CanLauncher")

    os.system("config-pin -a P9_14 pwm")
    os.system("config-pin -a P9_21 pwm")
    os.system("config-pin -a P9_22 pwm")

    GPIO.setup(startButton, GPIO.IN)
    GPIO.setup(confirmButton, GPIO.IN)
    # GPIO.setup(launchButton, GPIO.IN)

    time.sleep(0.5)

    boom()
项目:SOLAMS    作者:aishmittal    | 项目源码 | 文件源码
def startCapture(self):
        global new_user_added
        if new_user_added == True:

            self.initDir()
            self.capturing = True
            self.capture = cv2.VideoCapture(camera_port)
            self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.video_size.width())
            self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.video_size.height())

            self.timer = QtCore.QTimer()
            self.timer.timeout.connect(self.display_video_stream)
            self.timer.start(30)

        else:
            self.messageLbl.setText('Warning: First create new user')
项目:meleedb-segment    作者:sashahashi    | 项目源码 | 文件源码
def spaced_frames(parser, start=None, end=None, interval=None, num_samples=None, fuzz=4):
    if (interval is None and num_samples is None) or None not in (interval, num_samples):
        raise ValueError('exactly one of (interval, num_samples) must be set')

    vc = cv2.VideoCapture(parser.stream)
    video_length = vc.get(7) / vc.get(5)
    if not start or start < 0:
        start = 0
    if not end or end > video_length:
        end = video_length

    total_time = end - start

    if not num_samples:
        num_samples = total_time // interval

    for time in np.linspace(start, end, num=num_samples):
        time += randint(-1 * fuzz, fuzz) / vc.get(5)
        time = min([max([0, time]), total_time])
        vc.set(0, int(time * 1000))
        success, frame = vc.read()

        if success:
            yield (time, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
    return
项目:c3d_ucf101_siamese_yilin    作者:fxing328    | 项目源码 | 文件源码
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    # use cv to get frame number is not correct
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    pdb.set_trace()
    tensor = tensor_1[:,:count,:,:] 
    return tensor
项目:c3d_ucf101_siamese_yilin    作者:fxing328    | 项目源码 | 文件源码
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    return tensor_1
项目:c3d_ucf101_siamese_yilin    作者:fxing328    | 项目源码 | 文件源码
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count
    else:
        break
    tensor = tensor_1[:,:count,:,:] 
    return tensor
项目:c3d_ucf101_siamese_yilin    作者:fxing328    | 项目源码 | 文件源码
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    return tensor_1
项目:c3d_ucf101_siamese_yilin    作者:fxing328    | 项目源码 | 文件源码
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    tensor = tensor_1[:,:count,:,:]
    return tensor
项目:c3d_ucf101_siamese_yilin    作者:fxing328    | 项目源码 | 文件源码
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count
    else:
        break
    tensor = tensor_1[:,:count,:,:] 
    return tensor
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def start_video(self, model):
        camera = cv2.VideoCapture(0)
        while True:
            frame = camera.read()[1]
            if frame is None:
                continue
            image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_array = cv2.resize(image_array, (300, 300))
            image_array = substract_mean(image_array)
            image_array = np.expand_dims(image_array, 0)
            predictions = model.predict(image_array)
            detections = detect(predictions, self.prior_boxes)
            plot_detections(detections, frame, 0.6,
                            self.arg_to_class, self.colors)
            cv2.imshow('webcam', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        camera.release()
        cv2.destroyAllWindows()
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def initialize(self):
        # Initialize video capture
        self.cap = cv2.VideoCapture(self.ID)

        frameRate = 20.0
        frameWidth = 640
        frameHeight = 480

        if cv2.__version__[0] == "2":
            # Latest Stable Version (2.x)
            self.cap.set(cv2.cv.CV_CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameHeight)
        else:
            # version 3.1.0 (BETA)
            self.cap.set(cv2.CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)

        self.thresh = 0.4
        self.thresh_img = np.zeros((frameHeight, frameWidth, 3), dtype=np.uint8)
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def capture(self):
        capture = cv2.VideoCapture(self.device)
        capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)

        if not capture.isOpened():
            raise Exception('Failed to open camera capture.')

        for _ in range(0, 10):
            ret, img = capture.read()
            if not ret or self._blur_index(img) < self.blur_thres:
                time.sleep(0.5)
                continue
            capture.release()
            return img

        capture.release()
        raise Exception('Failed to capture image.')
项目:Face-recognition-test    作者:jiangwei1995910    | 项目源码 | 文件源码
def start():
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        FaceArray=getFaceArray(frame)
        img2=frame
        for r in FaceArray :
            img2=cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
            img3 = frame[r[1]:r[3], r[0]:r[2]]  # ?????????????
            feature=Tools.get_feature(img3)
            name=readFace(feature)
            font=cv2.FONT_HERSHEY_SIMPLEX
            img2= cv2.putText(img2,name,(r[1],r[3]), font, 1,(255,255,255),2)

        cv2.imshow('frame',img2)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
项目:commercials_project    作者:BryceLuna    | 项目源码 | 文件源码
def get_frames(file_str):
    '''
    string => None
    This function takes in the source of a video, samples from
    the video and writes those samples to a folder
    '''
    vid = cv2.VideoCapture(file_str)

    if vid.isOpened():
        frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
        step_size = int(1/float(pct_frames))

        for count in xrange(0,frame_count,step_size):
            w_path = write_path(file_str,count)
            vid.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
            ret, frame = vid.read()
            cv2.imwrite(w_path,frame)
            count+=step_size
        vid.release()
    else:
        print 'unable to open file: {}'.format(file_str)
项目:commercials_project    作者:BryceLuna    | 项目源码 | 文件源码
def get_frames(file_str):
        '''
        string => None
        This function takes in the source of a video, samples from
        the video and writes those samples to a folder
        '''
        vid = cv2.VideoCapture(file_str)

        if vid.isOpened():
            frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            step_size = int(1/float(pct_frames))

            for count in xrange(0,frame_count,step_size):
                w_path = write_path(file_str,count)
                vid.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
                ret, frame = vid.read()
                count+=step_size
                return frame
            vid.release()
        else:
            print 'unable to open file: {}'.format(file_str)
项目:commercials_project    作者:BryceLuna    | 项目源码 | 文件源码
def read_video(self):

        vid = cv2.VideoCapture(self.video_path)

        if vid.isOpened():

            frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            self.predictions = np.zeros((frame_count,100,100,3))#need to know frame size
            for count in xrange(frame_count):
                ret,frame = vid.read() #probably don't want to get every frame
                processed_frame = self.process_frame(frame)
                self.predictions[count] = processed_frame
            vid.release()
        else:
            print 'unable to open file: {}'.format(file_str)


    #maybe should separate this algo, or somehow automatically detect what the model accepts
    #should probably convert to float32, divide by 255.
项目:MyoSEMG    作者:LuffyDai    | 项目源码 | 文件源码
def __init__(self, name, ui=myo_emg.Ui_MainWindow(), cap=capture.capture()):
        super(VideoThread, self).__init__()
        self.flag = True
        self.start_flag = False
        self.support_flag = True
        self.name = name
        self.cap = cap
        self.ui = ui
        self.out = None
        self.stop_signal.connect(self.stop_play)
        self.image_siganl.connect(self.saving_video)
        self.start_signal.connect(self.start_capture)
        self.cap.path_signal.connect(self.save_video)
        if self.name == "Video":
            self.videoLabel = ui.Video
            self.camera = cv2.VideoCapture("instruction.mp4")
            self.fps = self.camera.get(cv2.CAP_PROP_FPS)
        elif self.name == "Camera":
            self.videoLabel = ui.Camera
            self.camera = cv2.VideoCapture(camera_port)
项目:LogoDetectionInVideo    作者:nmemme    | 项目源码 | 文件源码
def test(path):
    cap = cv2.VideoCapture(path_video)
    testing=[]
    while(True):
        ret, frame = cap.read()
        res=cv2.resize(frame,(250,250))

        gray_image = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        xarr=np.squeeze(np.array(gray_image).astype(np.float32))
        m,v=cv2.PCACompute(xarr)
        arr= np.array(v)
        flat_arr= arr.ravel()
        testing.append(flat_arr)
        #cv2.imshow('frame', frame)
        #if cv2.waitKey(1) & 0xFF == ord("q"):
         #   break
    #cap.release()
    #cv2.destroyAllWindows()
    logos=svm.predict(testing)
    uniqlogos=list(set(logos))
    for i in uniqlogos:
        print(i)
项目:face-recognition    作者:pratush07    | 项目源码 | 文件源码
def face_train_video(train_path,subject,max_train,stream):
    cap = cv2.VideoCapture(stream)
    ret=True
    ctr = 0
    # minimum 10 frames/images per video 
    while(ctr < max_train):
        # read till end of frames
        ret, img = cap.read()
        if not ret:
            break
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  
        cv2.imshow("Recognizing Face", img)
        cv2.waitKey(10)
        cv2.imwrite( join(train_path,subject)+ "." + str(ctr) +".jpg",img) # writes image  to disk
        ctr = ctr + 1
    cap.release()
    cv2.destroyAllWindows()

# predict live feed
项目:pdc-project    作者:ealain    | 项目源码 | 文件源码
def receive():
    '''
    1. Locate screen
    2. Follow the variations of intensity in the screen
    '''
    sampling_period = 1/SAMPLING_FREQUENCY
    f = open(EXCHANGE_FILE_PATH, 'w')
    f.write('')
    x,y,w,h = screen_position()
    if((x,y,w,h) == (-1,-1,-1,-1)):
        print("Unable to detect screen")
        return
    cap = cv2.VideoCapture(0)
    values = []
    try:
        while(True):
            ret, frame = cap.read()
            sub_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)[y:y+h, x:x+w]
            values.append(str(np.mean(sub_frame)))
    except KeyboardInterrupt:
        pass
    f.write('\n'.join(values))
    f.close()

    decode()
项目:Facial-Recognition-Tool    作者:JeeveshN    | 项目源码 | 文件源码
def recognize_video(face_recognizer):
    cap = cv2.VideoCapture(0)
    while True:
        if cap.grab():
            ref,image = cap.retrieve()
            image_grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
            faces = FACE_CASCADE.detectMultiScale(image_grey,scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0)
            for x,y,w,h in faces:
                    sub_img=image_grey[y:y+h,x:x+w]
                    img=image[y:y+h,x:x+w]
                    nbr,conf = face_recognizer.predict(sub_img)
                    cv2.rectangle(image,(x-5,y-5),(x+w+5,y+h+5),(255, 255,0),2)
                    cv2.putText(image,Data_list[nbr],(x,y-10), FONT, 0.5,(255,255,0),1)         
                cv2.imshow("Faces Found",image)
        if (cv2.waitKey(1) & 0xFF == ord('q')) or (cv2.waitKey(1) & 0xFF == ord('Q')):
            break
    Datafile["Data"]=Data_list
        Datafile.close()
    cap.release()
    cv2.destroyAllWindows()
项目:Simple-stream-Kafka    作者:amwaleh    | 项目源码 | 文件源码
def video_emitter(video):
    # Open the video
    video = cv2.VideoCapture(video)
    print(' emitting.....')

    # read the file
    while (video.isOpened):
        # read the image in each frame
        success, image = video.read()

        # check if the file has read the end
        if not success:
            break

        # convert the image png
        ret, jpeg = cv2.imencode('.png', image)
        # Convert the image to bytes and send to kafka
        producer.send_messages(topic, jpeg.tobytes())
        # To reduce CPU usage create sleep time of 0.2sec
        time.sleep(0.2)
    # clear the capture
    video.release()
    print('done emitting')
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def __init__    (self, path, queue_size = 128):
        self.stream = cv2.VideoCapture(path)
        self.exit = False

        self.queue = Queue(maxsize=queue_size)
项目:robik    作者:RecunchoMaker    | 项目源码 | 文件源码
def __init__(self, cubo, settings):
        """TODO: to be defined1. """
        self.camera_id = DEFAULT_CAMERA
        self.cap = cv2.VideoCapture(self.camera_id)
        self.settings = settings

        self.cubo = cubo
        self.lastmov = 0
        self.lastmovtam = 1
        self.status = ""

        self.reset()