Python cv2 模块,namedWindow() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.namedWindow()

项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def rgb_callback(self,data):
        try:
            self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
            rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print("The world coordinate system's origin in camera's coordinate system:")
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
            data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
            yaml.dump(data, rgb_stream)


            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5)
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def run(self):
        cv2.namedWindow("display", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
        cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)

        if self.extra_queue:
            cv2.namedWindow("extra", cv2.WINDOW_NORMAL)

        while True:
            # wait for an image (could happen at the very beginning when the queue is still empty)
            while len(self.queue) == 0:
                time.sleep(0.1)
            im = self.queue[0]
            cv2.imshow("display", im)
            k = cv2.waitKey(6) & 0xFF
            if k in [27, ord('q')]:
                rospy.signal_shutdown('Quit')
            elif k == ord('s'):
                self.opencv_calibration_node.screendump(im)
            if self.extra_queue:
                if len(self.extra_queue):
                    extra_img = self.extra_queue[0]
                    cv2.imshow("extra", extra_img)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def ir_callback(self,data):
        try:
            self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)


            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5)
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def run(im):
    im_disp = im.copy()
    window_name = "Draw line here."
    cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow(window_name, 910, 0)

    print " Drag across the screen to set lines.\n Do it twice"
    print " After drawing the lines press 'r' to resume\n"

    l1 = np.empty((2, 2), np.uint32)
    l2 = np.empty((2, 2), np.uint32)

    list = [l1,l2]

    mouse_down = False
    def callback(event, x, y, flags, param):
        global trigger, mouse_down

        if trigger<2:
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_down = True
                list[trigger][0] = (x, y)

            if event == cv2.EVENT_LBUTTONUP and mouse_down:
                mouse_down = False
                list[trigger][1] = (x,y)
                cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
                         (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
                trigger += 1
        else:
            pass
    cv2.setMouseCallback(window_name, callback)
    while True:
        cv2.imshow(window_name,im_disp)
        key = cv2.waitKey(10) & 0xFF

        if key == ord('r'):
            # Press key `q` to quit the program
            return list
            exit()
项目:CVMazeRunner    作者:M-Niedoba    | 项目源码 | 文件源码
def get_start_points(image):
    window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(MAZE_NAME, 900,900)
    cv2.imshow(MAZE_NAME,image)
    cv2.moveWindow(MAZE_NAME,100,100)
    print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)")

    while(True):
        key = cv2.waitKey(0)
        if key == ord('a'):
            print("Using Default Start and End Points")
            imageProcessor = ImageProcessor(image)
            start_x,start_y = imageProcessor.getDefaultStart(image)
            end_x, end_y = imageProcessor.getDefaultEnd(image)
            print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y)))
            break
        elif key == ord ('s'):
            print("Please select a start point")
            start_x,start_y = get_user_selected_point(image)
            print ("Start Point: {0}, please select an end point".format((start_x,start_y)))
            end_x,end_y = get_user_selected_point(image)
            print("End Pont: {0}".format((end_x,end_y)))
            break
        else:
            print("Invalid")
            continue
    cv2.destroyAllWindows()
    return start_x,start_y,end_x,end_y
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web????????????
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview')
项目:Stereo-Pose-Machines    作者:ppwwyyxx    | 项目源码 | 文件源码
def dump_2dcoor():
    camera = libcpm.Camera()
    camera.setup()
    runner = get_parallel_runner('../data/cpm.npy')
    cv2.namedWindow('color')
    cv2.startWindowThread()
    cnt = 0
    while True:
        cnt += 1
        m1 = camera.get_for_py(0)
        m1 = np.array(m1, copy=False)
        m2 = camera.get_for_py(1)
        m2 = np.array(m2, copy=False)

        o1, o2 = runner(m1, m2)
        pts = []
        for k in range(14):
            pts.append((argmax_2d(o1[:,:,k]),
                argmax_2d(o2[:,:,k])))
        pts = np.asarray(pts)
        np.save('pts{}.npy'.format(cnt), pts)
        cv2.imwrite("frame{}.png".format(cnt), m1);
        if cnt == 10:
            break
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data]
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview')
项目:AutonomousParking    作者:jovanduy    | 项目源码 | 文件源码
def __init__(self):
        """ Initialize the parking spot recognizer """

        #Subscribe to topics of interest
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
        rospy.Subscriber('/cmd_vel', Twist, self.process_twist)

        # Initialize video images
        cv2.namedWindow('video_window')        
        self.cv_image = None # the latest image from the camera
        self.dst =  np.zeros(IMG_SHAPE, np.uint8)
        self.arc_image = np.zeros(IMG_SHAPE, np.uint8)
        self.transformed = np.zeros(IMG_SHAPE, np.uint8)

        # Declare instance variables
        self.bridge = CvBridge() # used to convert ROS messages to OpenCV
        self.hsv_lb = np.array([0, 70, 60]) # hsv lower bound to filter for parking lines
        self.hsv_ub = np.array([30, 255, 140]) # hsv upper bound
        self.vel = None
        self.omega = None
        self.color = (0,0,255)
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e    

        inImgarr = np.array(inImg)
        self.outImg = self.process_image(inImgarr)

        # self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))    

        # cv2.namedWindow("Capture Face")
        cv2.imshow('Capture Face', self.outImg)
        cv2.waitKey(3)

        if self.count == 100*self.cp_rate:
            rospy.loginfo("Data Captured!")
            rospy.loginfo("Training Data...")
            self.fisher_train_data()
            rospy.signal_shutdown('done')
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def __init__(self):
        self.node_name = "hand_gestures"
        rospy.init_node(self.node_name)

        rospy.on_shutdown(self.cleanup)

        # self.cv_window_name = self.node_name
        # cv2.namedWindow("Depth Image", 1)
        # cv2.moveWindow("Depth Image", 20, 350)

        self.bridge = CvBridge()
        self.numFingers = RecognizeNumFingers() 

        self.depth_sub = rospy.Subscriber("/asus/depth/image_raw", Image, self.depth_callback)
        self.num_pub = rospy.Publisher('num_fingers', Int32, queue_size=10, latch=True)       
        # self.img_pub = rospy.Publisher('hand_img', Image, queue_size=10)
        rospy.loginfo("Waiting for image topics...")
项目:tbotnav    作者:patilnabhi    | 项目源码 | 文件源码
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e    

        inImgarr = np.array(inImg)
        self.outImg = self.process_image(inImgarr)

        # self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))    

        # cv2.namedWindow("Capture Face")
        cv2.imshow('Capture Face', self.outImg)
        cv2.waitKey(3)

        if self.count == 100*self.cp_rate:
            rospy.loginfo("Data Captured!")
            rospy.loginfo("Training Data...")
            self.fisher_train_data()
            rospy.signal_shutdown('done')
项目:fathom    作者:rdadolf    | 项目源码 | 文件源码
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'):
    self.ale = ALEInterface()
    self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
    self.ale.setInt("random_seed",123)
    self.ale.setInt("frame_skip",frameskip)
    romfile = str(ROM_PATH)+str(rom_name)
    if not os.path.exists(romfile):
      print 'No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.'
    self.ale.loadROM(romfile)
    self.legal_actions = self.ale.getMinimalActionSet()
    self.action_map = dict()
    self.windowname = windowname
    for i in range(len(self.legal_actions)):
      self.action_map[self.legal_actions[i]] = i

    # print(self.legal_actions)
    self.screen_width,self.screen_height = self.ale.getScreenDims()
    print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
    self.vis = vis
    if vis:
      cv2.startWindowThread()
      cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def __init__(self, files):
        if len(files) < 2:
            raise Exception('Need at least two files to compare.')

        self.image_window = 'Image'
        self.threshold_window = 'Threshold'
        self.difference_window = 'Difference'
        self.files = files
        self.tb_threshold = 'Threshold'
        self.tb_image = 'Image'
        self.current_image = 0

        self.image1 = None
        self.image2 = None
        self.difference = None
        self.threshold = 25
        self.gray = None

        cv2.namedWindow(self.image_window, cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow(self.difference_window, cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow(self.threshold_window, cv2.WINDOW_AUTOSIZE)
        cv2.createTrackbar(self.tb_image, self.difference_window, 0, len(self.files) - 2, self.selectImage)
        cv2.createTrackbar(self.tb_threshold, self.threshold_window, self.threshold, 255, self.renderThreshold)
        self.render()
项目:curly-fortnight    作者:sManohar201    | 项目源码 | 文件源码
def __init__(self):
        self.bridge = cv_bridge.CvBridge()
        cv2.namedWindow("input", 1)
        cv2.createTrackbar('param1', 'input', 10, 300, nothing)
        cv2.createTrackbar('param2', 'input', 15, 300, nothing)
        cv2.namedWindow("processed", 1)
        self.image_sb = rospy.Subscriber('/usb_cam/image_raw', Image, self.image_callback)

        self.motion = Twist()

        rate = rospy.Rate(20)

        # publish to cmd_vel of the jackal
        pub = rospy.Publisher("/jackal_velocity_controller/cmd_vel", Twist, queue_size=10)

        while not rospy.is_shutdown():
            # publish Twist
            pub.publish(self.motion)
            rate.sleep()
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def HistDemo(self):
        img = cv2.imread("lena.jpg")
        # invertImg=self.InvertImage(img)
        # logImg = self.LogImage(img, 10)
        # powerImg = self.PowerImage(img, 2.0)
        eqimg = self.HistEQ(img)
        # gray = baseTransfer.RGB2Gray(img)
        # eqgray = baseTransfer.HistEQ(gray)

        reghist = self.ImhistColor(img, 255)
        dImg, regMap = self.RegulateHist(eqimg, reghist)

        cv2.namedWindow("lena")
        cv2.imshow("lena", img)
        cv2.namedWindow("eq")
        cv2.imshow("eq", eqimg)
        cv2.namedWindow("reg")
        cv2.imshow("reg", dImg)
        cv2.waitKey(0)
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def WienerFilterDemo(self, img):
        img = np.int16(img)
        noise = self.noiseGenerator.GuassNoise(img, 0, 10, np.int32(img.size))
        nImg = img + noise
        fn = np.fft.fftshift(np.fft.fft2(noise))
        Sn = np.abs(fn) ** 2.0
        ffImg = np.fft.fftshift(np.fft.fft2(img))
        Sf = np.abs(ffImg) ** 2.0
        H = self.GenerateHDemo(img.shape)
        fImg = np.fft.fftshift(np.fft.fft2(img))
        gImg = np.fft.ifft2(np.fft.ifftshift(fImg * H))
        gImg += noise
        fgImg = np.fft.fftshift(np.fft.fft2(gImg))
        wH = (H * (np.abs(H) ** 2.0 + Sn / Sf)) / np.abs(H) ** 2.0
        H1 = self.IdeaLowHInverse(wH, 500)
        ggImg = np.fft.ifft2(np.fft.ifftshift(fgImg * H1))
        cv2.namedWindow("orig")
        cv2.imshow("orig", np.uint8(img))
        cv2.namedWindow("g")
        cv2.imshow("g", np.uint8(gImg))
        cv2.namedWindow("Wiener filter restore")
        cv2.imshow("Wiener filter restore", np.uint8(ggImg))
        cv2.waitKey(0)
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def FFTDemo(self):
        img = cv2.imread("lena.jpg")
        baseTransfer = BasePixelTransfer.BasePixelTransfer()
        img = baseTransfer.RGB2Gray(img)
        fimg = img.copy()
        fimg = np.float32(fimg)
        # for i in xrange(0, img.shape[0]):
        #     for j in xrange(0, img.shape[1]):
        #         fimg[i, j] *= (-1) ** (i + j)

        fftimg = np.fft.fftshift(np.fft.fft2(fimg))
        ifft = np.real(np.fft.ifft2(fftimg))
        for i in xrange(0, ifft.shape[0]):
            for j in xrange(0, ifft.shape[1]):
                ifft[i, j] *= (-1) ** (i + j)
        cv2.namedWindow("lena")
        cv2.imshow("lena", img)
        cv2.namedWindow("fft")
        cv2.imshow("fft", np.real(fftimg))
        cv2.imshow("ifft", np.uint8(ifft))
        cv2.waitKey(0)
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def LaplaceFFTDemo(self):
        origfftimg = self.PrepareFFT()
        fftimg = origfftimg.copy()
        sz = fftimg.shape
        center = np.mat(fftimg.shape) / 2.0
        for i in xrange(0, 512):
            for j in xrange(0, 512):
                #pass
                #print -(np.float64(i - center[0, 0]) ** 2.0 + np.float64(j - center[0, 1]) ** 2.0)
                fftimg[i, j] *= - 0.00001* (np.float64(i - 256) ** 2.0 + np.float64(j - 256) ** 2.0)
        ifft = self.GetIFFT(fftimg)
        #plt.imshow(np.real(fftimg))
        #plt.show()
        # cv2.namedWindow("fft1")
        # cv2.imshow("fft1", np.real(origfftimg))
        cv2.namedWindow("fft")
        cv2.imshow("fft", np.real(fftimg))
        # cv2.imshow("ifft", np.uint8(ifft))
        cv2.namedWindow("ifft")
        cv2.imshow("ifft", ifft)
        cv2.waitKey(0)
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def GuassNoiseDemo(self, img, miu, theta, noiseNum):
        sz = img.shape
        noiseImg = self.GuassNoise(img, miu, theta, noiseNum)
        gImg = img + noiseImg
        #pdf = self.GuassianPDF(miu, theta)
        # for i in xrange(0, noiseNum):
        #     x = np.random.random() * (sz[0] - 1)
        #     y = np.random.random() * (sz[1] - 1)
        #     noise = self.PDFMap(pdf, np.random.random())
        #     print noise
        #     noiseImg[x, y] = noiseImg[x, y] + noise
        cv2.namedWindow("lena")
        cv2.namedWindow("gauss noise")
        cv2.namedWindow("dst")
        cv2.imshow("lena", img)
        cv2.imshow("gauss noise", np.uint8(noiseImg))
        cv2.imshow("dst", np.uint8(gImg))
        cv2.waitKey(0)
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def evaluate_images():
    ssd = SSD()

    cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
    loader = coco.Loader(False)
    test_batches = loader.create_batches(1, shuffle=True)
    global i2name
    i2name = loader.i2name

    while True:
        batch = test_batches.next()
        imgs, anns = loader.preprocess_batch(batch)
        pred_labels_f, pred_locs_f, step = ssd.sess.run([ssd.pred_labels, ssd.pred_locs, ssd.global_step],
                                                        feed_dict={ssd.imgs_ph: imgs, ssd.bn: False})
        boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0])
        draw_outputs(imgs[0], boxes_, confidences_, wait=0)
项目:ssd_tensorflow    作者:seann999    | 项目源码 | 文件源码
def show_webcam(address):
    cam = webcam.WebcamStream(address)
    cam.start_stream_threads()

    ssd = SSD()

    global i2name
    i2name = pickle.load(open("i2name.p", "rb"))

    cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)

    boxes_ = None
    confidences_ = None

    while True:
        sample = cam.image
        resized_img = skimage.transform.resize(sample, (image_size, image_size))

        pred_labels_f, pred_locs_f = ssd.sess.run([ssd.pred_labels, ssd.pred_locs],
                                                        feed_dict={ssd.imgs_ph: [resized_img], ssd.bn: False})

        boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0], boxes_, confidences_)

        resize_boxes(resized_img, sample, boxes_)
        draw_outputs(np.asarray(sample) / 255.0, boxes_, confidences_, wait=10)
项目:Face-Recognition-for-Mobile-Robot    作者:gagolucasm    | 项目源码 | 文件源码
def gui():
    size=100
    img = np.zeros((1000,700,3), np.uint8)
    cv2.namedWindow('GUI')
    xmar=ymar=50
    for i in range(6):
        for j in range(4):
            img1 = cv2.imread("faces/cara"+str(i+j+1)+".JPEG")
            img1=resize(img1,width = size,height=size)
            if (img1.shape[0] == 100 and img1.shape[1] == 100):
                img[ymar:ymar+size, xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+size)] = img1
            else:
                img[ymar:ymar+img1.shape[0], xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+img1.shape[1])] = img1
        ymar+=150
    cv2.putText(img, "Presiona Q para salir", (5, 25),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.putText(img, "TFG Lucas Gago", (500, 925),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.putText(img, "Version 3", (500, 950),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.imshow('GUI',img)
项目:atari-human-checkpoint-replay    作者:ionelhosu    | 项目源码 | 文件源码
def __init__(self, rom_name, vis,windowname='preview'):
        self.ale = ALEInterface()
        self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
        self.ale.setInt("random_seed",123)
        self.ale.setInt("frame_skip",4)
        self.ale.loadROM('roms/' + rom_name )
        self.legal_actions = self.ale.getMinimalActionSet()
        self.action_map = dict()
        self.windowname = windowname
        for i in range(len(self.legal_actions)):
            self.action_map[self.legal_actions[i]] = i
        self.init_frame_number = 0

        # print(self.legal_actions)
        self.screen_width,self.screen_height = self.ale.getScreenDims()
        print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
        self.vis = vis
        if vis: 
            cv2.startWindowThread()
            cv2.namedWindow(self.windowname)
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def setFingerTemplate(big_image, name_template_file):
    global add_frame
    name_window = 'big image'
    cv2.namedWindow(name_window)
    cv2.setMouseCallback(name_window,save_corners)
    add_frame = np.zeros(big_image.shape, np.uint8) 
    while(True):
        frame_with_rect = cv2.add(add_frame, big_image)
        cv2.imshow(name_window,frame_with_rect)
        cur_key = cv2.waitKey(1)
        if cur_key == 27:
            break
        if cur_key == ord('s') and (len(corners_x) == 2):
            template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
            cv2.imwrite(name_template_file,template_img)
            break
    cv2.destroyAllWindows()
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def setCameraProperties():
    name_window = 'Press esc after finish'
    cv2.namedWindow(name_window)
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_SETTINGS, 0);
    ret, frame_from = cap.read()


    while(cap.isOpened()):
        ret, frame_from = cap.read()
        frame = cv2.flip(frame_from, -1)
        if ret==True:
            cv2.imshow(name_window,frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
        else:
            break
    # Release everything if job is finished
    cap.release()
    cv2.destroyAllWindows()
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def getFrameFromCamera():
    name_window = 'camera window'
    cv2.namedWindow(name_window)
    cap = cv2.VideoCapture(0)
    ret, frame_from = cap.read()
    output = []
    while(cap.isOpened()):
        ret, frame = cap.read()
        frame  = cv2.flip(frame, -1)
        if ret==True:
            cv2.imshow(name_window,frame)
            cur_key = cv2.waitKey(1) 
            if cur_key == 27:
                break
            if cur_key == ord('s'):
                output = frame
                break
        else:
            break
    # Release everything if job is finished
    cap.release()
    #out.release()
    cv2.destroyAllWindows()
    return output
项目:SmartCap    作者:TusharChugh    | 项目源码 | 文件源码
def run_main():
    # First initialize the camera capture object with the cv2.VideoCapture class.
    camera = cv2.VideoCapture(camera_port)

    #To create a new window for display
    #Disabled it for now as we can't see the image on the cap
    #cv2.namedWindow(display, cv2.WINDOW_NORMAL)    

    # Discard the first few frame to adjust the camera light levels
    for i in xrange(ramp_frames):
        temp = get_image(camera)

    #Now take the image and save it after every 1 second
    while(1):
        try:
            time.sleep(1)
            take_save_image(camera)
        except Exception, e:
            print "Exception occured \n"
            print e
            pass
项目:vae-style-transfer    作者:sunsided    | 项目源码 | 文件源码
def load_images(queue: PriorityQueue,
                source: int,
                file_path: str,
                target_width: int,
                target_height: int,
                display_progress: bool=False):
    window = 'image'
    if display_progress:
        cv2.namedWindow(window)

    for file in iglob(path.join(file_path, '**', '*.jpg'), recursive=True):
        buffer = cv2.imread(file)
        buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA)

        random_priority = random()
        queue.put((random_priority, (buffer, source)))

        if display_progress:
            cv2.imshow(window, buffer)
            if (cv2.waitKey(33) & 0xff) == 27:
                break

    if display_progress:
        cv2.destroyWindow(window)
项目:CVMazeRunner    作者:M-Niedoba    | 项目源码 | 文件源码
def setupWindow():
    filename = getUserSelectedImage()
    imageProcessor = ImageProcessor(cv2.imread(filename,0))
    colourImage = cv2.imread(filename,1)
    image = imageProcessor.getThresholdedImage(False)
    granularity = imageProcessor.get_granularity(image, 100)
    print("Granularity: {0}".format(granularity))
    start_x,start_y,end_x,end_y = get_start_points(image)
    image = imageProcessor.encloseMaze(image)
    mazerunner = MazeSolver.MazeSolver(image,granularity)
    solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y)
    if(not solution):
        cv2.imshow(MAZE_NAME,image)
    else:
        solvedImage = draw_solution(solution, colourImage)
        solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage)
        solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage)
        window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Solved Image", 900,900)
        cv2.moveWindow("Solved Image",100,100)
        cv2.imshow("Solved Image",solvedImage)
    print "Press any key to exit"
    cv2.waitKey(0)
    cv2.destroyAllWindows
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def __init__(self):
        self.norm_rgb=np.zeros((600,800,3),np.uint8)
        self.dst=np.zeros((600,800),np.uint8)
        self.b=0
        self.g=0
        self.r=0

        self.lb=0
        self.lg=0
        self.lr=0

        self.m=np.zeros((600,800),np.uint8)
        #self.win=cv2.namedWindow("detect")
        #self.dst=cv.CreateImage((800,600),8,1)

        #cv2.createTrackbar("blue", "detect",0,255,self.change_b)
        #cv2.createTrackbar("green","detect",0,255,self.change_g)
        #cv2.createTrackbar("red","detect",0,255,self.change_r)

        #cv2.createTrackbar("low_blue", "detect",0,255,self.change_lb)
        #cv2.createTrackbar("low_green","detect",0,255,self.change_lg)
        #cv2.createTrackbar("low_red","detect",0,255,self.change_lr)
项目:virtual-dressing-room    作者:akash0x53    | 项目源码 | 文件源码
def __init__(self):
        global color
        self.rgb=np.zeros((600,800,3),np.uint8)
        self.mask=np.zeros((600,800),np.uint8)
        self.hue_val=color
        self.scratch=np.zeros((600,800,3),np.uint8)

        #cv2.namedWindow("hue")
        #cv2.createTrackbar("hue", "hue",self.hue_val,255,self.change)
项目:moVi    作者:netsecIITK    | 项目源码 | 文件源码
def __init__(self, name):
        self.frame_name = name
        cv2.namedWindow(name)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def trackbar_create(label, win_name, v, maxv, scale=1.0): 
    global trackbars
    if label in trackbars:     
        raise RuntimeError('Duplicate key. %s already created' % label)
    trackbars[label] = dict(label=label, win_name=win_name, value=v, scale=scale)

    cv2.namedWindow(win_name)
    cv2.createTrackbar(label, win_name, v, maxv, trackbar_update)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def mouse_event_create(win_name, cb): 
    cv2.namedWindow(win_name)
    cv2.setMouseCallback(win_name, cb)
项目:opencv-gui-helper-tool    作者:maunesh    | 项目源码 | 文件源码
def __init__(self, image, filter_size=1, threshold1=0, threshold2=0):
        self.image = image
        self._filter_size = filter_size
        self._threshold1 = threshold1
        self._threshold2 = threshold2

        def onchangeThreshold1(pos):
            self._threshold1 = pos
            self._render()

        def onchangeThreshold2(pos):
            self._threshold2 = pos
            self._render()

        def onchangeFilterSize(pos):
            self._filter_size = pos
            self._filter_size += (self._filter_size + 1) % 2
            self._render()

        cv2.namedWindow('edges')

        cv2.createTrackbar('threshold1', 'edges', self._threshold1, 255, onchangeThreshold1)
        cv2.createTrackbar('threshold2', 'edges', self._threshold2, 255, onchangeThreshold2)
        cv2.createTrackbar('filter_size', 'edges', self._filter_size, 20, onchangeFilterSize)

        self._render()

        print "Adjust the parameters as desired.  Hit any key to close."

        cv2.waitKey(0)

        cv2.destroyWindow('edges')
        cv2.destroyWindow('smoothed')
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def __init__(self, video_src):
        self.cam = cv2.VideoCapture(video_src)
        ret, self.frame = self.cam.read()
        cv2.namedWindow('gesture_hci')

        # set channel range of skin detection 
        self.mask_lower_yrb = np.array([44, 131, 80])       # [54, 131, 110]
        self.mask_upper_yrb = np.array([163, 157, 155])     # [163, 157, 135]
        # create trackbar for skin calibration
        self.calib_switch = False

        # create background subtractor 
        self.fgbg = cv2.BackgroundSubtractorMOG2(history=120, varThreshold=50, bShadowDetection=True)

        # define dynamic ROI area
        self.ROIx, self.ROIy = 200, 200
        self.track_switch = False
        # record previous positions of the centroid of ROI
        self.preCX = None
        self.preCY = None

        # A queue to record last couple gesture command
        self.last_cmds = FixedQueue()

        # prepare some data for detecting single-finger gesture  
        self.fin1 = cv2.imread('./test_data/index1.jpg')
        self.fin2 = cv2.imread('./test_data/index2.jpg')
        self.fin3 = cv2.imread('./test_data/index3.jpg')

        # switch to turn on mouse input control
        self.cmd_switch = False

        # count loop (frame), for debugging
        self.n_frame = 0


# On-line Calibration for skin detection (bug, not stable)
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def display(win_name, img):
    cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
    cv2.imshow( win_name,  numpy.asarray( img[:,:] ))
    k=-1
    while k ==-1:
        k=waitkey()
    cv2.destroyWindow(win_name)
    if k in [27, ord('q')]:
        rospy.signal_shutdown('Quit')
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def __init__(self, topic):
        self.topic = topic
        self.windowNameOrig = "Camera: {0}".format(self.topic)
        cv2.namedWindow(self.windowNameOrig, 2)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber(self.topic, Image, self.callback)
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def cvShowImageColor(image_file):
    image_bgr = cv2.imread(image_file)
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.imshow('image', image_bgr)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# OpenCV???????????????
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def cvShowImageGray(image_file):
    image_gray = cv2.imread(image_file, 0)
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.imshow('image', image_gray)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# Matplot??????????
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def drawingDemo():
    img = emptyImage()

    # ??2?????
    drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2)

    # ???-1???????????????
    drawCircle(img, (300, 100), 80, (0, 255, 0), -1)

    # ????????
    drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1)
    drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3)

    # ?????
    drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1)

    # ???????
    pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32)
    drawPolylines(img, pts, True, (255, 100, 100), 5)

    # ???????
    drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2)

    cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('DrawingDemo', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def run(self):
        print ("VEDIO server starts...")
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        print ("video server <-> remote server success connected...")
        check = "F"
        check = self.sock.recv(1)
        if check.decode("utf-8") != "S":
            return
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += self.sock.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += self.sock.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            try:
                cv2.imshow('Remote', frame)
            except:
                pass
            if cv2.waitKey(1) & 0xFF == 27:
                break
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def run(self):
        print ("VEDIO client starts...")
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        print ("video client <-> remote server success connected...")
        check = "F"
        check = self.sock.recv(1)
        if check.decode("utf-8") != "S":
            return
        print ("receive authend")
        #self.cap = cv2.VideoCapture(0)
        self.cap = cv2.VideoCapture("test.mp4")
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print ("remote VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            if self.level > 0:
                frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(frame)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
                print("video send ", len(zdata))
            except:
                break
            for i in range(self.interval):
                self.cap.read()
项目:lan-ichat    作者:Forec    | 项目源码 | 文件源码
def run(self):
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print("VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(sframe)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
            except:
                break
            for i in range(self.interval):
                self.cap.read()
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def __init__(self, video_src = 0, interactive = True, video = 'vss.avi', fallback = 'synth:bg=./data/hi.jpg:noise=0.05', save_frames = False, frame_dir = './data', frame_prefix='frame-'):
        cam = create_capture(video_src, fallback=fallback)
        vwriter = None
        if interactive:
            print("q to quit")
            window = 'video square search'
            cv2.namedWindow(window, cv2.WINDOW_NORMAL)
        else:
            vwriter = VideoWriter(video)
        run = True
        t = clock()
        frameCounter = 0
        while run:
            ret, img = cam.read()
            if interactive:
                print("read next image")
            squares = find_squares(img, 0.2)
            nOfSquares = len(squares)
            cv2.drawContours( img, squares, -1, (0, 255, 0), 3 )
            dt = clock() - t
            draw_str(img, (80, 80), 'time: %.1f ms found = %d' % (dt*1000, nOfSquares), 2.0)
            if interactive:
                cv2.imshow(window, img)    
                print('q to quit, n for next')
                ch = 0xff & cv2.waitKey(100) 
                if ch == ord('q'):
                    run = False
                elif ch == ord('n'):
                    continue
            else:
                vwriter.addFrame(img)
                if save_frames:
                    fn = os.path.join(frame_dir, '%s-%04d.%s' % (frame_prefix, frameCounter, 'jpg'))
                    print("Saving %d frame to %s" % (frameCounter, fn))
                    cv2.imwrite(fn, img)
                    frameCounter+=1
        if vwriter:
            vwriter.finalise()