Python cv2 模块,setMouseCallback() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.setMouseCallback()

项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def run(self):
        cv2.namedWindow("display", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
        cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)

        if self.extra_queue:
            cv2.namedWindow("extra", cv2.WINDOW_NORMAL)

        while True:
            # wait for an image (could happen at the very beginning when the queue is still empty)
            while len(self.queue) == 0:
                time.sleep(0.1)
            im = self.queue[0]
            cv2.imshow("display", im)
            k = cv2.waitKey(6) & 0xFF
            if k in [27, ord('q')]:
                rospy.signal_shutdown('Quit')
            elif k == ord('s'):
                self.opencv_calibration_node.screendump(im)
            if self.extra_queue:
                if len(self.extra_queue):
                    extra_img = self.extra_queue[0]
                    cv2.imshow("extra", extra_img)
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def run(im):
    im_disp = im.copy()
    window_name = "Draw line here."
    cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow(window_name, 910, 0)

    print " Drag across the screen to set lines.\n Do it twice"
    print " After drawing the lines press 'r' to resume\n"

    l1 = np.empty((2, 2), np.uint32)
    l2 = np.empty((2, 2), np.uint32)

    list = [l1,l2]

    mouse_down = False
    def callback(event, x, y, flags, param):
        global trigger, mouse_down

        if trigger<2:
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_down = True
                list[trigger][0] = (x, y)

            if event == cv2.EVENT_LBUTTONUP and mouse_down:
                mouse_down = False
                list[trigger][1] = (x,y)
                cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
                         (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
                trigger += 1
        else:
            pass
    cv2.setMouseCallback(window_name, callback)
    while True:
        cv2.imshow(window_name,im_disp)
        key = cv2.waitKey(10) & 0xFF

        if key == ord('r'):
            # Press key `q` to quit the program
            return list
            exit()
项目:Rapidly-Exploring-Random-Tree-Star-Scan    作者:vampcoder    | 项目源码 | 文件源码
def getSourceAndGoal(self):
            cv2.namedWindow('image')
            cv2.setMouseCallback('image', self.draw_circle)
            cv2.imshow('image', img)
            cv2.waitKey(0)
项目:video_labeler    作者:hahnyuan    | 项目源码 | 文件源码
def __init__(self,labels,video_file,box_saver,border=30):
        """
        the GUI Labeler
        :param labels: the labels name string list
        :param video_file: the video file path
        :param border: the border of the center clip filed (white line around the video)
        :param save_dir: label result save path
        :param save_im: if write every cropped image to each label directory
        """
        self.cam = cv2.VideoCapture(video_file)
        self.video_stat = VideoStat(border)
        self.label_stat = LabelStat(labels)
        self.labels=labels
        self.box_saver=box_saver
        cv2.setMouseCallback("video", self.video_click)
        cv2.setMouseCallback("label", self.label_click)
        self.run()
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data]
项目:football-stats    作者:dev-labs-bg    | 项目源码 | 文件源码
def getPerpectiveCoordinates(frame, windowName, mouse):
    cv2.imshow(windowName, frame)   
    cv2.setMouseCallback(windowName, mouse.leftClick)

    i = 0
    coords = []

    while i < 4:
        i += 1
        mouse.x = 0
        mouse.y = 0
        mouse.leftClicked = False

        while mouse.leftClicked == False:
            # wait for key press
            key = cv2.waitKey(1) & 0xFF

        coords.append((mouse.x, mouse.y))

    return coords

# debug coordinates by moving the mouse
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def setFingerTemplate(big_image, name_template_file):
    global add_frame
    name_window = 'big image'
    cv2.namedWindow(name_window)
    cv2.setMouseCallback(name_window,save_corners)
    add_frame = np.zeros(big_image.shape, np.uint8) 
    while(True):
        frame_with_rect = cv2.add(add_frame, big_image)
        cv2.imshow(name_window,frame_with_rect)
        cur_key = cv2.waitKey(1)
        if cur_key == 27:
            break
        if cur_key == ord('s') and (len(corners_x) == 2):
            template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
            cv2.imwrite(name_template_file,template_img)
            break
    cv2.destroyAllWindows()
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def setFingerTemplate(big_image, name_template_file):
    global add_frame
    name_window = 'big image'
    cv2.namedWindow(name_window)
    cv2.setMouseCallback(name_window,save_corners)
    add_frame = np.zeros(big_image.shape, np.uint8) 
    while(True):
        frame_with_rect = cv2.add(add_frame, big_image)
        cv2.imshow(name_window,frame_with_rect)
        cur_key = cv2.waitKey(1)
        if cur_key == 27:
            break
        if cur_key == ord('s') and (len(corners_x) == 2):
            template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
            cv2.imwrite(name_template_file,template_img)
            break
    cv2.destroyAllWindows()
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def mouse_event_create(win_name, cb): 
    cv2.namedWindow(win_name)
    cv2.setMouseCallback(win_name, cb)
项目:PyIntroduction    作者:tody411    | 项目源码 | 文件源码
def setCallBack(self, win_name):
        cv2.setMouseCallback(win_name, self._callBack)
项目:cv-lane    作者:kendricktan    | 项目源码 | 文件源码
def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run)
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def main():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())

    # load the image, clone it, and setup the mouse callback function
    image = cv2.imread(args["image"])
    clone = image.copy()
    images = [clone, image]
    ref_pt = [None, None]

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", make_mouse_callback(images, ref_pt))

    # keep looping until the 'q' key is pressed
    while True:
        # display the image and wait for a keypress
        cv2.imshow("image", images[1])
        key = cv2.waitKey(1) & 0xFF

        # if the 'c' key is pressed, break from the loop
        if key == ord("c"):
            if ref_pt[1] is None:
                continue
            roi = clone[ref_pt[0][1]:ref_pt[1][1], ref_pt[0][0]:ref_pt[1][0]]
            interactive_save(roi)
        elif key == ord("q"):
            break

    # if there are two reference points, then crop the region of interest
    # from teh image and display it

    # close all open windows
    cv2.destroyAllWindows()
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def main():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())

    # load the image, clone it, and setup the mouse callback function
    image = cv2.imread(args["image"])
    clone = image.copy()
    images = [clone, image]
    ref_pt = [None, None]

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", make_mouse_callback(images, ref_pt))

    # keep looping until the 'q' key is pressed
    while True:
        # display the image and wait for a keypress
        cv2.imshow("image", images[1])
        key = cv2.waitKey(1) & 0xFF

        # if the 'c' key is pressed, break from the loop
        if key == ord("c"):
            if ref_pt[1] is None:
                continue
            roi = clone[ref_pt[0][1]:ref_pt[1][1], ref_pt[0][0]:ref_pt[1][0]]
            interactive_save(roi)
        elif key == ord("q"):
            break

    # if there are two reference points, then crop the region of interest
    # from teh image and display it

    # close all open windows
    cv2.destroyAllWindows()
项目:lqRRT    作者:jnez71    | 项目源码 | 文件源码
def __init__(self, height, width, image_path):
        self.height, self.width = height, width

        self.clear_screen()

        if image_path:
            self.make_image(image_path)

        cv2.namedWindow('Draw OccupancyGrid')
        cv2.setMouseCallback('Draw OccupancyGrid', self.do_draw)
        self.drawing = 0
项目:DrosophilaCooperative    作者:avaccari    | 项目源码 | 文件源码
def selectArea(self):
        self.userInteraction = True
        cv2.namedWindow(self.selectionWindow)
        cv2.setMouseCallback(self.selectionWindow, self.mouseInteraction)
        self.workingFrame = self.processedFrame.copy()
        self.showFrame(self.selectionWindow, self.workingFrame)

        while True:
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                self.undoFrames = []
                break
            elif key == ord('c'):
                self.workingFrame = self.processedFrame.copy()
                self.trackedAreasList = []
                self.undoFrames = []
                self.showFrame(self.selectionWindow, self.workingFrame)
            elif key == ord('l'):
                try:
                    self.trackedAreasList.pop()
                except IndexError:
                    pass
                else:
                    self.workingFrame = self.undoFrames.pop()
                    self.showFrame(self.selectionWindow, self.workingFrame)
            elif key == ord('t'):
                self.undoFrames = []
                self.trackArea = self.refPt
                self.tracking = True
                self.trackDump = []
                if self.pause is True:
                    self.pause = False
                break
            elif key == ord('h'):
                self.showHelp('select')

        cv2.destroyWindow(self.selectionWindow)
        self.userInteration = False
项目:inpainting    作者:kvfrans    | 项目源码 | 文件源码
def __init__(self, windowname, dests, colors_func):
            self.prev_pt = None
            self.windowname = windowname
            self.dests = dests
            self.colors_func = colors_func
            self.dirty = False
            self.show()
            cv2.setMouseCallback(self.windowname, self.on_mouse)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def _find_power_plug_thing(self):
        """ Find the power plug at the solar array box """

        """ This uses color to determine if we have a choke """
        lower = np.array([100, 40, 0], dtype = "uint8")
        upper = np.array([255, 255, 20], dtype = "uint8")
        mask = cv2.inRange(self.img, lower, upper)

        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))

        if len(sorted_contours) > 0:
            plug = self._find_a_thing(sorted_contours[0], 0, 0.06, 0, 0.06, 99.0)

            if plug is not None:
                plug.set_power_plug()
                self.things.append(plug)
                self.power_plug = plug

                if self.debug:
                    debug_img = self.img.copy()
                    for c in sorted_contours:
                        cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
                    cv2.imshow("plug picture", debug_img)
                    cv2.setMouseCallback("plug picture", self.handle_mouse)
                    cv2.waitKey(0)
                    cv2.destroyAllWindows()
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_cloud(self):
        self.zarj.transform.init_transforms()
        self.cloud = self.zarj.eyes.get_stereo_cloud()
        self.data = self.zarj.eyes.get_cloud_data(self.cloud)
        self.raw_img, self.details = self.zarj.eyes.get_cloud_image_with_details(self.cloud)
        print "<<<< FOCUS IMAGE WINDOW AND PRESS ANY KEY TO CONTINUE >>>>"
        cv2.imshow("cloud view", raw_img)
        cv2.setMouseCallback("cloud view", handle_mouse)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv2.setMouseCallback(self.windowname, self.on_mouse)
项目:python-opencv2    作者:bunkahle    | 项目源码 | 文件源码
def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv2.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
项目:Interactive-object-tracking    作者:abhishekarya286    | 项目源码 | 文件源码
def label(image) :
    '''This function along with click_and_crop() helps in labelling object and background.
       Input : Input image
       Output: selected region of interest(either obj or distractor)'''
    global refPt,cropping,img_copy,clone,list_refpt
    #image1=copy.deepcopy(image)
    #clone = image1.copy()
    #img_copy=image1.copy()
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click_and_crop)
    print "Label the object"
    print "After making a bounding box, press 'c' "
    print "if you wish to select the object again, press 'r' "

    while True:
    # display the image and wait for a keypress
        cv2.imshow("image", img_copy)
        key = cv2.waitKey(1) & 0xFF

        # if the 'r' key is pressed, reset the cropping region
        if key == ord("r"):
            image = clone.copy()
            img_copy=image.copy()

        # if the 'c' key is pressed, break from the loop
        elif key == ord("c"):
            break

    # if there are two reference points, then crop the region of interest
    # from the image and display it
    if len(refPt) == 2:
        roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
        cv2.imshow("ROI", roi)
        print "press any key"
        cv2.waitKey(0)

    cv2.destroyAllWindows() # close all open windows
    obj_img=roi  # roi containing the object
    list_refpt.append(refPt)
    print list_refpt,"check_list"
    return obj_img
项目:Interactive-object-tracking    作者:abhishekarya286    | 项目源码 | 文件源码
def label(image) :
    '''This function along with click_and_crop() helps in labelling object and background.
       Input : Input image
       Output: selected region of interest(either obj or distractor)'''
    global refPt,cropping,img_copy,clone,list_refpt
    #image1=copy.deepcopy(image)
    #clone = image1.copy()
    #img_copy=image1.copy()
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click_and_crop)
    print "Label the object"
    print "After making a bounding box, press 'c' "
    print "if you wish to select the object again, press 'r' "

    while True:
    # display the image and wait for a keypress
        cv2.imshow("image", img_copy)
        key = cv2.waitKey(1) & 0xFF

        # if the 'r' key is pressed, reset the cropping region
        if key == ord("r"):
            image = clone.copy()
            img_copy=image.copy()

        # if the 'c' key is pressed, break from the loop
        elif key == ord("c"):
            break

    # if there are two reference points, then crop the region of interest
    # from the image and display it
    if len(refPt) == 2:
        roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
        cv2.imshow("ROI", roi)
        print "press any key"
        cv2.waitKey(0)

    cv2.destroyAllWindows() # close all open windows
    obj_img=roi  # roi containing the object
    list_refpt.append(refPt)
    return obj_img
项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv2.setMouseCallback(self.windowname, self.on_mouse)
项目:emojivis    作者:JustinShenk    | 项目源码 | 文件源码
def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv2.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
项目:ATLeS    作者:liffiton    | 项目源码 | 文件源码
def __init__(self, tx1, tx2, ty1, ty2):
        # For mouse interaction
        self._mouse_on = False
        cv2.namedWindow("preview")
        atexit.register(cv2.destroyAllWindows)
        cv2.setMouseCallback("preview", self.mouse_callback)
        # Setup tank bounds
        self._tx1 = tx1
        self._tx2 = tx2
        self._ty1 = ty1
        self._ty2 = ty2
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def __init__(self):

        self.br = CvBridge()

        # If you subscribe /camera/depth_registered/hw_registered/image_rect topic, the depth image and rgb image are 
        # already registered. So you don't need to call register_depth_to_rgb()
        self.depth_image_sub = rospy.Subscriber("/camera/depth_registered/hw_registered/image_rect",Image,self.depth_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_rect_color",Image,self.rgb_callback)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.ir_to_world_tvec = None
        self.ir_to_rgb_rmat = None
        self.load_extrinsics()
        self.load_intrinsics()
        self.depth_image = None
        self.rgb_image = None
        self.count = 0
        self.drawing = False # true if mouse is pressed
        self.rect_done = False
        self.ix1 = -1
        self.iy1 = -1
        self.ix2 = -1
        self.iy2 = -1

        cv2.namedWindow('RGB Image')
        cv2.setMouseCallback('RGB Image',self.draw_rect)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def __init__(self):

        self.br = CvBridge()

        # If you subscribe /camera/depth_registered/hw_registered/image_rect topic, the depth image and rgb image are 
        # already registered. So you don't need to call register_depth_to_rgb()
        self.depth_image_sub = rospy.Subscriber("/camera/depth_registered/hw_registered/image_rect",Image,self.depth_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_rect_color",Image,self.rgb_callback)

        # self.pub = rospy.Publisher('chatter', String, queue_size=10)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.ir_to_world_tvec = None
        self.ir_to_rgb_rmat = None
        self.load_extrinsics()
        self.load_intrinsics()

        self.drawing = False # true if mouse is pressed
        self.rect_done = False
        self.ix1 = -1
        self.iy1 = -1
        self.ix2 = -1
        self.iy2 = -1

        cv2.namedWindow('RGB Image')
        cv2.setMouseCallback('RGB Image',self.draw_rect)
项目:Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials    作者:taochenshh    | 项目源码 | 文件源码
def __init__(self):

        self.br = CvBridge()

        # If you subscribe /camera/depth_registered/hw_registered/image_rect topic, the depth image and rgb image are 
        # already registered. So you don't need to call register_depth_to_rgb()
        self.depth_image_sub = rospy.Subscriber("/camera/depth_registered/hw_registered/image_rect",Image,self.depth_callback)
        self.rgb_image_sub = rospy.Subscriber("/camera/rgb/image_rect_color",Image,self.rgb_callback)

        # self.pub = rospy.Publisher('chatter', String, queue_size=10)
        self.ir_img = None
        self.rgb_img = None

        self.rgb_rmat = None
        self.rgb_tvec = None
        self.ir_rmat = None
        self.ir_tvec = None

        self.ir_to_rgb_rmat = None
        self.ir_to_rgb_tvec = None
        self.depth_image = None
        self.rgb_image = None
        self.ir_to_world_tvec = None
        self.ir_to_rgb_rmat = None
        self.load_extrinsics()
        self.load_intrinsics()

        self.drawing = False # true if mouse is pressed
        self.rect_done = False
        self.ix1 = -1
        self.iy1 = -1
        self.ix2 = -1
        self.iy2 = -1

        cv2.namedWindow('RGB Image')
        cv2.setMouseCallback('RGB Image',self.draw_rect)
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def klick_landmarks_on_image():
    global current_landmark, klicked_landmarks

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click)


    show_lms_on_image()
    image     = cv2.imread('/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.JPG')
    for lm_idx in range(68):
        while True:
            temp_image = image.copy()
            lms_to_be_shown = klicked_landmarks#+current_landmark
            if len(current_landmark)>0:
                lms_to_be_shown =klicked_landmarks + [current_landmark]

            if len(lms_to_be_shown)>0:
                draw_lms_on_image(temp_image, lms_to_be_shown)

            cv2.imshow("image", temp_image)
            key = cv2.waitKey(1) & 0xFF

            if key == ord(" "):
                if len(current_landmark)>0:
                    klicked_landmarks.append(current_landmark)
                    break
            if key == ord("q"):
                return 0
        current_landmark=[]
    cv2.destroyWindow("image")

    #now write lm file 
    landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts'
    with open(landmark_file, "w") as lf:
        lf.write('version: 1\n')
        lf.write('n_points: 68\n')
        lf.write('{\n')
        for landmark in klicked_landmarks:
            lf.write(str(landmark[0])+" "+str(landmark[1])+"\n")
        lf.write('}\n')
项目:thesis_scripts    作者:PhilippKopp    | 项目源码 | 文件源码
def main():
    all_bb = []
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", click)
    images = glob.glob('/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/*/frames/000001.png')
    output_file_path = '/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/bb_clicked_philipp.log'
    for i, image_path in enumerate(images):
        print ('image',image_path,'(',i,'of',len(images),')')
        image     = cv2.imread(image_path)
        upper_left_point, lower_right_point = click_bb_on_image(image)
        all_bb.append([upper_left_point[0], upper_left_point[1], lower_right_point[0], lower_right_point[1]])
        #print (upper_left_point, lower_right_point)
        open(output_file_path, 'a').write(str(image_path)+' '+str(upper_left_point[0])+' '+str(upper_left_point[1])+' '+str(lower_right_point[0])+' '+str(lower_right_point[1])+'\n')
    cv2.destroyWindow("image")





    #now write lm file 
#   landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts'
#   with open(landmark_file, "w") as lf:
#       lf.write('version: 1\n')
#       lf.write('n_points: 68\n')
#       lf.write('{\n')
#       for landmark in klicked_landmarks:
#           lf.write(str(landmark[0])+" "+str(landmark[1])+"\n")
#       lf.write('}\n')
#   return x, y, w, h
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def __init__(self):
        if len(sys.argv) < 2:
            print('Usage: calibration <directory> [<filename>]')
            print('The supplied directory should contain the calibration images.')
            sys.exit(1)

        size = 640

        self.path = sys.argv[1]
        self.image_window = 'Image Calibration'
        self.sky_window = 'Sky Calibration'
        self.tb_image_switch = 'image'
        self.tb_max_mag = 'maximum magnitude'
        self.save_file_name = 'data'
        self.selected_star = None
        self.selected_color = (0, 0, 255)
        self.marked_color = (0, 255, 0)
        self.circle_radius = 5
        self.max_mag = 4
        self.renderer = SkyRenderer(size)

        try:
            self.calibrator = Calibrator(SkyCameraFile.glob(self.path), EarthLocation(lat=Configuration.latitude, lon=Configuration.longitude, height=Configuration.elevation))
        except Exception as e:
            print(e.message)
            sys.exit(2)

        if len(sys.argv) > 2:
            self.save_file_name = sys.argv[2]
            if os.path.exists(self.save_file_name):
                self.calibrator.load(self.save_file_name)

        cv2.namedWindow(self.image_window, cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow(self.sky_window, cv2.WINDOW_AUTOSIZE)

        self.selectImage(0)

        cv2.setMouseCallback(self.image_window, self.imageMouseCallback)
        cv2.setMouseCallback(self.sky_window, self.skyMouseCallback)
        cv2.createTrackbar(self.tb_image_switch, self.image_window, 0, len(self.calibrator.files) - 1, self.selectImage)
        cv2.createTrackbar(self.tb_max_mag, self.sky_window, self.max_mag, 6, self.setMaxMag)
项目:Artificial-Intelligence-with-Python    作者:PacktPublishing    | 项目源码 | 文件源码
def __init__(self, scaling_factor=0.5):
        # Initialize the video capture object
        self.cap = cv2.VideoCapture(0)

        # Capture the frame from the webcam
        _, self.frame = self.cap.read()

        # Scaling factor for the captured frame
        self.scaling_factor = scaling_factor

        # Resize the frame
        self.frame = cv2.resize(self.frame, None, 
                fx=self.scaling_factor, fy=self.scaling_factor, 
                interpolation=cv2.INTER_AREA)

        # Create a window to display the frame
        cv2.namedWindow('Object Tracker')

        # Set the mouse callback function to track the mouse
        cv2.setMouseCallback('Object Tracker', self.mouse_event)

        # Initialize variable related to rectangular region selection
        self.selection = None

        # Initialize variable related to starting position 
        self.drag_start = None

        # Initialize variable related to the state of tracking 
        self.tracking_state = 0

    # Define a method to track the mouse events
项目:GoogleVideo    作者:bw4sz    | 项目源码 | 文件源码
def Crop(img,title):


    def onmouse(event,x,y,flags,param):
        global ix,iy,roi,drawing        

        # Draw Rectangle
        if event == cv2.EVENT_RBUTTONDOWN:
            drawing = True
            ix,iy = x,y

        elif event == cv2.EVENT_MOUSEMOVE:
            if drawing == True:
                cv2.rectangle(img,(ix,iy),(x,y),BLUE,-1)
                rect = (ix,iy,abs(ix-x),abs(iy-y))

        elif event == cv2.EVENT_RBUTTONUP:
            drawing = False
            cv2.rectangle(img,(ix,iy),(x,y),BLUE,-1)
            rect = (ix,iy,x,y)
            roi.extend(rect)

    cv2.namedWindow(title,cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(title,onmouse)

    print ("Right click and hold to draw a single rectangle ROI, beginning at the top left corner of the desired area. A blue box should appear. Hit esc to exit screen. Window can be resized by selecting borders.")
    while True:
            cv2.namedWindow(title,cv2.WINDOW_NORMAL)                 
            cv2.imshow(title,img)
            k = cv2.waitKey(1) & 0xFF
            if k == 27:
                    break

    cv2.destroyAllWindows()

    print(roi)
    return(roi)
项目:GoogleVideo    作者:bw4sz    | 项目源码 | 文件源码
def Crop(img,title):


    def onmouse(event,x,y,flags,param):
        global ix,iy,roi,drawing        

        # Draw Rectangle
        if event == cv2.EVENT_RBUTTONDOWN:
            drawing = True
            ix,iy = x,y

        elif event == cv2.EVENT_MOUSEMOVE:
            if drawing == True:
                cv2.rectangle(img,(ix,iy),(x,y),BLUE,-1)
                rect = (ix,iy,abs(ix-x),abs(iy-y))

        elif event == cv2.EVENT_RBUTTONUP:
            drawing = False
            cv2.rectangle(img,(ix,iy),(x,y),BLUE,-1)
            rect = (ix,iy,x,y)
            roi.extend(rect)

    cv2.namedWindow(title,cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(title,onmouse)

    print ("Right click and hold to draw a single rectangle ROI, beginning at the top left corner of the desired area. A blue box should appear. Hit esc to exit screen. Window can be resized by selecting borders.")
    while True:
            cv2.namedWindow(title,cv2.WINDOW_NORMAL)                 
            cv2.imshow(title,img)
            k = cv2.waitKey(1) & 0xFF
            if k == 27:
                    break

    cv2.destroyAllWindows()

    print(roi)
    return(roi)
项目:OpenCV-Snapchat-DogFilter    作者:sguduguntla    | 项目源码 | 文件源码
def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv2.setMouseCallback(self.windowname, self.on_mouse)
项目:OpenCV-Snapchat-DogFilter    作者:sguduguntla    | 项目源码 | 文件源码
def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv2.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
项目:CameraTablet    作者:dmvlasenk    | 项目源码 | 文件源码
def setCorners(self):
        name_window = 'Set Corners'
        cv2.namedWindow(name_window)
        cv2.setMouseCallback(name_window, saveTabletCorners)
        cap = cv2.VideoCapture(0)
        ret, frame_from = cap.read()
        TTablet.m_CornersX = []
        TTablet.m_CornersY = []
        TTablet.m_AddFrame = np.zeros(frame_from.shape, np.uint8) 
        #print ("start setCorners")

        while(cap.isOpened()):
            ret, frame_from = cap.read()
            frame_from  = cv2.flip(frame_from, -1)
            frame = cv2.add(TTablet.m_AddFrame, frame_from)
            if ret==True:
                cv2.imshow(name_window,frame)
                #print ("fasdfasdf")
                if cv2.waitKey(1) & (len(TTablet.m_CornersX) > 3):
                    break
            else:
                break
        # Release everything if job is finished
        cap.release()
        #out.release()
        cv2.destroyAllWindows()
项目:CVMazeRunner    作者:M-Niedoba    | 项目源码 | 文件源码
def get_user_selected_point(image):
    global point
    point = (-1,-1)
    cv2.setMouseCallback(MAZE_NAME,get_mouse_point)
    print("Press any key once you have selected your point")
    while point == (-1,-1):
        cv2.waitKey(0)
        if(point == (-1,-1)):
            print("Invalid pont, please try again")
    return point[0],point[1]
项目:memegenerator    作者:Huxwell    | 项目源码 | 文件源码
def __init__(self, windowname, dests, colors_func):
        self.prev_pt = None
        self.windowname = windowname
        self.dests = dests
        self.colors_func = colors_func
        self.dirty = False
        self.show()
        cv2.setMouseCallback(self.windowname, self.on_mouse)
项目:memegenerator    作者:Huxwell    | 项目源码 | 文件源码
def __init__(self, win, callback):
        self.win = win
        self.callback = callback
        cv2.setMouseCallback(win, self.onmouse)
        self.drag_start = None
        self.drag_rect = None
项目:Rapidly-Exploring-Random-Tree-Star-Scan    作者:vampcoder    | 项目源码 | 文件源码
def getSourceAndGoal(self):
        cv2.namedWindow('image')
        cv2.setMouseCallback('image', self.draw_circle)
        cv2.imshow('image', img)
        cv2.waitKey(0)
项目:Rapidly-Exploring-Random-Tree-Star-Scan    作者:vampcoder    | 项目源码 | 文件源码
def getSourceAndGoal(self):
        cv2.namedWindow('image')
        cv2.setMouseCallback('image', self.draw_circle)
        cv2.imshow('image', img)
        cv2.waitKey(0)
项目:Rapidly-Exploring-Random-Tree-Star-Scan    作者:vampcoder    | 项目源码 | 文件源码
def getSourceAndGoal(self):
        cv2.namedWindow('image')
        cv2.setMouseCallback('image', self.draw_circle)
        cv2.imshow('image', img)
        cv2.waitKey(0)
项目:Rapidly-Exploring-Random-Tree-Star-Scan    作者:vampcoder    | 项目源码 | 文件源码
def getSourceAndGoal(self):
        cv2.namedWindow('image')
        cv2.setMouseCallback('image', self.draw_circle)
        cv2.imshow('image', img)
        cv2.waitKey(0)
项目:pytomatic    作者:N0K0    | 项目源码 | 文件源码
def main():
    img = None
    main_win = Windows_handler.WinHandler(title='Nox',class_name='Qt5QWindowIcon')
    main_box = main_win.get_bbox()
    px_handler = Pixel_handler.PixelSearch(win_handler=main_win)
    mouse = Mouse_handler.MouseMovement(window_handler=main_win)
    main_win.init_window()
    cv2.namedWindow('image_name')
    cv2.namedWindow('config')

    while True:

        img = px_handler.grab_window(bbox=main_box)
        img = px_handler.img_to_numpy(img,compound=False)
        img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)

        orb = cv2.ORB_create()
        kp = orb.detect(img, None)
        kp, des = orb.compute(img, kp)
        img2 = cv2.drawKeypoints(img, kp)

        cv2.imshow('image_name',img2)
        cv2.setMouseCallback('image_name', mouse_event, param=img)


        k = cv2.waitKey(1)
        if k == ord('q'):  # wait for ESC key to exit
            cv2.destroyAllWindows()
            quit(0)
项目:ConditionalGAN    作者:seungjooli    | 项目源码 | 文件源码
def test(m):
    class DrawingState:
        def __init__(self):
            self.x_prev = 0
            self.y_prev = 0
            self.drawing = False
            self.update = True

    def interactive_drawing(event, x, y, flags, param):
        image = param[0]
        state = param[1]
        if event == cv2.EVENT_LBUTTONDOWN:
            state.drawing = True
            state.x_prev, state.y_prev = x, y
        elif event == cv2.EVENT_MOUSEMOVE:
            if state.drawing:
                cv2.line(image, (state.x_prev, state.y_prev), (x, y), (1, 1, 1), 1)
                state.x_prev = x
                state.y_prev = y
                state.update = True
        elif event == cv2.EVENT_LBUTTONUP:
            state.drawing = False
        elif event == cv2.EVENT_RBUTTONDOWN:
            image.fill(0)
            state.update = True

    cv2.namedWindow('Canvas')
    image_input = np.zeros((FLAGS.input_height, FLAGS.input_width, 3), np.float32)
    state = DrawingState()
    cv2.setMouseCallback('Canvas', interactive_drawing, [image_input, state])
    while cv2.getWindowProperty('Canvas', 0) >= 0:
        if state.update:
            reshaped_image_input = np.array([image_input])
            image_output = m.test(reshaped_image_input)
            concatenated = np.concatenate((image_input, image_output[0]), axis=1)
            color_converted = cv2.cvtColor(concatenated, cv2.COLOR_RGB2BGR)
            cv2.imshow('Canvas', color_converted)
            state.update = False

        k = cv2.waitKey(1) & 0xFF
        if k == 27:  # esc
            break
    cv2.destroyAllWindows()
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def main(wall_file = None):
    global image, draw_tile, clone
    image = np.zeros((600, 600, 3), dtype = "uint8")
    clone = image.copy()

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", mouse_callback)

    if wall_file:
        _load_wall(wall_file)

    # keep looping until the 'q' key is pressed
    while True:
        # display the image and wait for a keypress
        cv2.imshow("image", image)
        key = cv2.waitKey(1) & 0xFF

        # if the 'r' key is pressed, reset the cropping region
        if key == ord("r"):
            image = clone.copy()

        elif key == ord("g"):
            print("savig the .piwall file")
            target = open(generated_piwall, 'w')
            dotpiwall = DotPiwall("test", wall)
            #print(str(dotpiwall))
            target.write(str(dotpiwall))
            target.close()

        elif key == ord("d"):
            draw_tile = not draw_tile

        elif key == ord("h"):
            help_popup()

        elif key == ord("s"):
            save_wall_popup()

        elif key == ord("l"):
            load_wall_popup()

        elif key == ord("w"):
            print(wall)
            print('gui tiles %d' % len(tiles))

            # if the 'c' key is pressed, break from the loop
        elif key == ord("c") or key == ord("q"):
            break

        # if there are two reference points, then crop the region of interest
        # from teh image and display it
    if len(refPt) == 2:
        roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
        cv2.imshow("ROI", roi)
        cv2.waitKey(0)

    # close all open windows
    cv2.destroyAllWindows()
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def _find_array_button_thing(self):
        """ Find the array button on the solar array box """

        """ This uses color to determine if we have a choke """
        lower = np.array([0, 0, 60], dtype = "uint8")
        upper = np.array([20, 20, 255], dtype = "uint8")
        mask = cv2.inRange(self.img, lower, upper)

        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        debug_img = None
        if self.debug:
            debug_img = self.img.copy()

        button_box = None
        for c in contours:
            box = cv2.boundingRect(c)

            if button_box is None:
                button_box = box
            else:
                button_box = self._union_box(deepcopy(button_box), box)

        if button_box is None:
            return

        top,bottom,left,right,center = self.find_dimensions(np.int0(np.array(self._bound_to_boxpoints(button_box))))
        if top is None or left is None or center is None:
            return None

        height = self.find_distance(top, bottom)
        width = self.find_distance(left, right)

        if self.debug:
            for c in contours:
                cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)

            cv2.circle(debug_img, top, 5, (255, 255, 0))
            cv2.circle(debug_img, bottom, 5, (255, 255, 0))
            cv2.circle(debug_img, left, 5, (255, 255, 0))
            cv2.circle(debug_img, right, 5, (255, 255, 0))
            cv2.rectangle(debug_img, (button_box[0], button_box[1]),
                    (button_box[0] + button_box[2], button_box[1] + button_box[3]), (128, 0, 128), 2)
            #cv2.circle(debug_img, center, 5, (255, 255, 0))

            cv2.imshow("button picture", debug_img)
            cv2.setMouseCallback("button picture", self.handle_mouse)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        self.array_button = Thing(height, width, center, None)
        self.array_button.set_array_button()
        self.array_button.computed_center = self.compute_center(left, right, top, bottom)
        self.things.append(self.array_button)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def _find_choke_thing(self):
        """ Find the choke at the solar array box """

        """ This uses color to determine if we have a choke """
        lower = np.array([128, 0, 0], dtype = "uint8")
        upper = np.array([255, 10, 20], dtype = "uint8")
        mask = cv2.inRange(self.img, lower, upper)

        blurred = cv2.GaussianBlur(mask, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        # Note: you can get your view of the choke cut; so you see an end as 2 contours.
        #  In a dream world, we would combine two small controus close together.
        #  But for now, we just operate with the two biggest contours
        sorted_contours = sorted(contours, cmp=lambda a,b: int(cv2.contourArea(b)) - int(cv2.contourArea(a)))

        if len(sorted_contours) >= 2:
            thing1 = self._find_a_thing(sorted_contours[0], 0, 0.1, 0, 0.1, 99.0)
            thing2 = self._find_a_thing(sorted_contours[1], 0, 0.1, 0, 0.1, 99.0)

            if thing1 is not None and thing2 is not None:
                print "We have a choke!"
                box1 = cv2.boundingRect(sorted_contours[0])
                box2 = cv2.boundingRect(sorted_contours[1])
                if box1[0] > box2[0]:
                    inner = thing1
                    outer = thing2
                else:
                    inner = thing2
                    outer = thing1

                inner.set_choke_inner()
                self.things.append(inner)
                self.choke_inner = inner
                outer.set_choke_outer()
                self.things.append(outer)
                self.choke_outer = outer

                if self.debug:
                    debug_img = self.img.copy()
                    for c in sorted_contours:
                        cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
                    cv2.imshow("choke picture", debug_img)
                    cv2.setMouseCallback("choke picture", self.handle_mouse)
                    cv2.waitKey(0)
                    cv2.destroyAllWindows()
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def _find_habitat_things(self):
        """ Find interesting objects in the habitat world """
        self._find_repair_button_thing()

        # Start by grey scale, blur, and thresholding
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

        # find contours in the thresholded image
        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = contours[0] if is_cv2() else contours[1]

        debug_img = self.img.copy()

        for c in contours:
            thing = self._find_a_thing(c, self.MINIMUM_TOOL_LENGTH, self.MAXIMUM_TOOL_LENGTH,
                                     self.MINIMUM_TOOL_WIDTH, self.MAXIMUM_TOOL_WIDTH,
                                     self.TOOL_MUST_BE_WITHIN, debug_img)
            if thing is None:
                continue

            x,y,w,h = cv2.boundingRect(c)
            detector_center, up = self.is_detector(self.img[y:y+h,x:x+w].copy())

            if detector_center is not None:
                adjusted_detector = (int(detector_center[0] + x), int(detector_center[1] + y))
                thing.set_detector(adjusted_detector, up)
                self.things.append(thing)
                self.leak_detector = thing

            repair_center, up = self.is_repair_tool(self.img[y:y+h,x:x+w].copy())
            if repair_center is not None:
                adjusted_repair = (int(repair_center[0] + x), int(repair_center[1] + y))
                thing.set_repair_tool(adjusted_repair, up)
                self.things.append(thing)
                self.repair_tool = thing



        if self.debug:
            for c in contours:
                cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
            cv2.imshow("habitat things", debug_img)
            cv2.setMouseCallback("habitat things", self.handle_mouse)
            cv2.waitKey(0)
            cv2.destroyAllWindows()