Python cv2 模块,resizeWindow() 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用cv2.resizeWindow()

项目:CVMazeRunner    作者:M-Niedoba    | 项目源码 | 文件源码
def get_start_points(image):
    window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(MAZE_NAME, 900,900)
    cv2.imshow(MAZE_NAME,image)
    cv2.moveWindow(MAZE_NAME,100,100)
    print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)")

    while(True):
        key = cv2.waitKey(0)
        if key == ord('a'):
            print("Using Default Start and End Points")
            imageProcessor = ImageProcessor(image)
            start_x,start_y = imageProcessor.getDefaultStart(image)
            end_x, end_y = imageProcessor.getDefaultEnd(image)
            print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y)))
            break
        elif key == ord ('s'):
            print("Please select a start point")
            start_x,start_y = get_user_selected_point(image)
            print ("Start Point: {0}, please select an end point".format((start_x,start_y)))
            end_x,end_y = get_user_selected_point(image)
            print("End Pont: {0}".format((end_x,end_y)))
            break
        else:
            print("Invalid")
            continue
    cv2.destroyAllWindows()
    return start_x,start_y,end_x,end_y
项目:CVMazeRunner    作者:M-Niedoba    | 项目源码 | 文件源码
def setupWindow():
    filename = getUserSelectedImage()
    imageProcessor = ImageProcessor(cv2.imread(filename,0))
    colourImage = cv2.imread(filename,1)
    image = imageProcessor.getThresholdedImage(False)
    granularity = imageProcessor.get_granularity(image, 100)
    print("Granularity: {0}".format(granularity))
    start_x,start_y,end_x,end_y = get_start_points(image)
    image = imageProcessor.encloseMaze(image)
    mazerunner = MazeSolver.MazeSolver(image,granularity)
    solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y)
    if(not solution):
        cv2.imshow(MAZE_NAME,image)
    else:
        solvedImage = draw_solution(solution, colourImage)
        solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage)
        solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage)
        window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Solved Image", 900,900)
        cv2.moveWindow("Solved Image",100,100)
        cv2.imshow("Solved Image",solvedImage)
    print "Press any key to exit"
    cv2.waitKey(0)
    cv2.destroyAllWindows
项目:Automatic-Plate-Number-Recognition-APNR    作者:kagan94    | 项目源码 | 文件源码
def plot_plate_numbers(plates_images):
    ''' Plot Plate Numbers as separate images '''
    i = 0
    for plate_img in plates_images:
        cv2.imshow('plate-%s' % i, plate_img)
        cv2.resizeWindow("plate-%s" % i, 300, 40)
        cv2.imwrite('plates/plate-%s.jpg' % i, plate_img)
        i += 1
项目:segmentation-visualization-training    作者:tkwoo    | 项目源码 | 文件源码
def train_visualization_seg(self, model, epoch):
        image_name_list = sorted(glob(os.path.join(self.flag.data_path,'train/IMAGE/*/*.png')))
        print image_name_list

        image_name = image_name_list[-1]
        image_size = self.flag.image_size

        imgInput = cv2.imread(image_name, self.flag.color_mode)
        output_path = self.flag.output_dir
        input_data = imgInput.reshape((1,image_size,image_size,self.flag.color_mode*2+1))

        t_start = cv2.getTickCount()
        result = model.predict(input_data, 1)
        t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
        print "[*] Predict Time: %.3f ms"%t_total

        imgMask = (result[0]*255).astype(np.uint8)
        imgShow = cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
        imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
        imgShow = cv2.addWeighted(imgShow, 0.9, imgMaskColor, 0.4, 0.0)
        output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
        cv2.imwrite(output_path, imgShow)
        # print "SAVE:[%s]"%output_path
        # cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
        # cv2.namedWindow("show", 0)
        # cv2.resizeWindow("show", 800, 800)
        # cv2.imshow("show", imgShow)
        # cv2.waitKey(1)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_image(self, cv_image, header, tag):
        """ process the image """
        objects = self.cascade.detectMultiScale(cv_image)
        for obj in objects:
            cv2.rectangle(
                cv_image, (obj[0], obj[1]),
                (obj[0] + obj[2], obj[1] + obj[3]), (0, 255, 0))

        cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(tag, 600, 600)
        cv2.imshow(tag, cv_image)
        cv2.waitKey(1)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def look_for_exit(self, image, debug=False, unwarped=True):
        """ Look for an exit from our position """
        for row in range(1, image.shape[0]/10):
            img = image.copy()
            base_row = (row * 10) + 5
            image_dump = os.environ.get("ZARJ_IMAGE_DUMP")
            markup = debug or (image_dump is not None)
            output = self.figure_path(img, base_row, markup, True)
            if output is not None and output['right'] is not None and\
               output['left'] is not None:
                real_distance = self.unwarp_perspective((image.shape[1]/2,
                                                         base_row))
                if unwarped:
                    base_row = real_distance
                if markup:
                    txt = "({0:.2f})".format(real_distance)
                    cv2.putText(img, txt, (0, img.shape[0]-10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
                if debug:
                    name = "_exit_decision_"
                    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
                    cv2.resizeWindow(name, 500, 250)
                    cv2.imshow(name, img)
                    cv2.waitKey(1)
                if image_dump is not None:
                    cv2.imwrite(image_dump + '/exit_{0}.png'.format(
                        debug_sequence()), img)
                if real_distance > 1.8:
                    log('Wait a second! An exit {} away is too far away'.format(
                        real_distance))
                    return None
                return base_row
        return None
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_image(self, cv_image, header, tag):
        """ process the image """
        cv2.namedWindow("watching:"+tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow("watching:"+tag, 500, 250)
        cv2.imshow("watching:"+tag, cv_image)
        cv2.waitKey(1)
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def process_image(self, cv_image, header, tag):
        """ process the image """
        hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

        # mask for color range
        if self.color_range:
            mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1])
            count = cv2.countNonZero(mask)
            if count:
                kernel = np.ones((5, 5), np.uint8)
                mask = cv2.dilate(mask, kernel, iterations=2)
                contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)

                for i, c in enumerate(contours):
                    x, y, w, h = cv2.boundingRect(c)
                    if self.prefix is not None:
                        name = '{0}{1}_{2}_{3}.png'.format(self.prefix,
                                                           tag,
                                                           header.seq, i)
                        print name
                        roi = cv_image[y:y+h, x:x+w]
                        gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                        gray = cv2.equalizeHist(gray)
                        cv2.imwrite(name, gray)

                for c in contours:
                    x, y, w, h = cv2.boundingRect(c)
                    cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0))
            elif self.prefix is not None:
                name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag,
                                                        header.seq, )
                cv2.imwrite(name, cv_image)

        cv2.namedWindow(tag, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(tag, 600, 600)
        cv2.imshow(tag, cv_image)
        cv2.waitKey(1)
项目:facemoji    作者:PiotrDabrowskey    | 项目源码 | 文件源码
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            if cv2.__version__ != '3.1.0':
                prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def showImage(img, weight, height):
    screen_res = weight, height
    scale_width = screen_res[0] / img.shape[1]
    scale_height = screen_res[1] / img.shape[0]
    scale = min(scale_width, scale_height)
    window_width = int(img.shape[1] * scale)
    window_height = int(img.shape[0] * scale)

    cv2.namedWindow('dst_rt', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('dst_rt', window_width, window_height)

    cv2.imshow('dst_rt', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:drowsy_detection    作者:thandongtb    | 项目源码 | 文件源码
def showImage(img, weight, height):
    screen_res = weight, height
    scale_width = screen_res[0] / img.shape[1]
    scale_height = screen_res[1] / img.shape[0]
    scale = min(scale_width, scale_height)
    window_width = int(img.shape[1] * scale)
    window_height = int(img.shape[0] * scale)

    cv2.namedWindow('dst_rt', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('dst_rt', window_width, window_height)

    cv2.imshow('dst_rt', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:keyLogger    作者:michalmonday    | 项目源码 | 文件源码
def ViewScreenCaptureStream(self): #not developed it much, it requires more work to be done to be fully functional
        frames = []
        frameFileNames = [fN for fN in self.ftp.nlst("\\"+ self.directories[self.dirNum] +"\\vv") if fN != "s.mm"]
        if frameFileNames:
            for fileName in frameFileNames:
                retrievedData = []
                self.ftp.retrbinary('RETR ' + "\\"+ self.directories[self.dirNum] +"\\vv\\" + fileName, retrievedData.append)
                tempBuff = StringIO.StringIO()
                tempBuff.write(XorText("".join(retrievedData),self.xorMap))
                tempBuff.seek(0) #need to jump back to the beginning before handing it off to PIL
                printscreen_pil = Image.open(tempBuff)

                printscreen_pil = printscreen_pil.resize((printscreen_pil.size[0],printscreen_pil.size[1]), Image.ANTIALIAS)
                frame = np.array(printscreen_pil.getdata(),dtype=np.uint8).reshape((printscreen_pil.size[1],printscreen_pil.size[0],3))
                #frames.append(frame)

                cv2.namedWindow("window", cv2.WINDOW_NORMAL)
                cv2.imshow('window', frame)
                #cv2.resizeWindow('window', 200,200)
                if cv2.waitKey(0) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    break
        else:
            print "No frames available"
            return
        '''
        for frame in frames:
            cv2.namedWindow("window", cv2.WINDOW_NORMAL)
            cv2.imshow('window', frame)
            #cv2.resizeWindow('window', 200,200)
            if cv2.waitKey(0) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break
        '''
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def on_mouse(event, x, y, flag, params):
    global start_draw
    global roi_x0
    global roi_y0
    global roi_x1
    global roi_y1
    global image2

    if (event == cv.CV_EVENT_LBUTTONDOWN):
        print("LButton")
        if (not start_draw):
            roi_x0 = x
            roi_y0 = y
            start_draw = True
        else:
            roi_x1 = x
            roi_y1 = y
            start_draw = False


    elif (event == cv.CV_EVENT_MOUSEMOVE and start_draw):
        #Redraw ROI selection
        image2 = cv.CloneImage(image)
        if(len(rect_list)>0):
            for coord in rect_list:
                cv.Rectangle(image2,coord[0],coord[1],
                    cv.CV_RGB(255,0,0),5
                )
        cv.Rectangle(image2, (roi_x0, roi_y0), (x,y), 
            cv.CV_RGB(255,0,255),5)
        cv.ShowImage(window_name, image2)
    #cv2.resizeWindow(window_name, int(round(width/2)),int(round(height/2)))
项目:Camelyon17    作者:deepiano    | 项目源码 | 文件源码
def make_normal_mask(path_tis_msk, path_tumor_msk, path_save_location):

    print('==> making normal mask...')

    tis_msk = cv2.imread(path_tis_msk)
    tumor_msk = cv2.imread(path_tumor_msk)


    tumor_msk_bool = (tumor_msk == 255)
    tis_msk_after = tis_msk.copy()
    tis_msk_after[tumor_msk_bool] = 0

    print('==> saving normal mask at' + path_save_location + ' ...')
    cv2.imwrite(path_save_location, tis_msk_after)

### Display result
    """
    cv2.namedWindow('tis_msk', cv2.WINDOW_NORMAL)
    cv2.namedWindow('tis_msk_after', cv2.WINDOW_NORMAL)
    cv2.namedWindow('tumor_msk', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('tis_msk', 512, 512)
    cv2.resizeWindow('tis_msk_after', 512, 512)
    cv2.resizeWindow('tumor_msk', 512, 512)
    cv2.imshow('tis_msk', tis_msk)
    cv2.imshow('tis_msk_after', tis_msk_after)
    cv2.imshow('tumor_msk', tumor_msk)
    cv2.waitKey()
    cv2.destoryAllWindows()
    """
项目:Vision2016    作者:AluminatiFRC    | 项目源码 | 文件源码
def __resizeWindow(self):
        height = 0
        if not self.paramCount:
            height = 76
        else:
            height = self.paramCount * 19
        cv2.resizeWindow(self.windowName, 500, height)
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def im_show(name, image, resize=1):
    H, W = image.shape[0:2]
    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.imshow(name, image.astype(np.uint8))
    cv2.resizeWindow(name, round(resize * W), round(resize * H))
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def runClassifier(classifier_yaml, output_dir):
    traincascade_data_dir = '{}/data'.format(output_dir)
    classifier_xml = '{}/cascade.xml'.format(traincascade_data_dir)

    if not os.path.isfile(classifier_xml):
        print 'ERROR: Classifier does not exist:', classifier_xml
        sys.exit(1)

    classifier = cv2.CascadeClassifier(classifier_xml)

    for test_dir in classifier_yaml['testing']['directories']:
        test_source_name = test_dir.strip('/').split('/')[-1]

        results_dir = '{}/{}_results'.format(output_dir, test_source_name)
        detections_fname = '{}/{}_detections.dat'.format(output_dir, test_source_name)

        img_list = utils.list_images_in_directory(test_dir)
        random.shuffle(img_list)

        for img_path in img_list:
            img = cv2.imread(img_path)

            while img.shape[0] > 1024:
                print 'resize:', img_path, img.shape
                img = cv2.resize(img, dsize=None, fx=0.5, fy=0.5)

            # # Check whether the image is upside-down:
            # if checkImageOrientation(img_path):
            #     print 'Flipped!'
            #     img = cv2.flip(img, -1)

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            minSize = (img.shape[0] / 50, img.shape[1] / 50)
            cars = classifier.detectMultiScale(
                image=gray,
                # scaleFactor=1.05,
                scaleFactor=1.01,
                minNeighbors=4,
                minSize=minSize,
            )

            print img_path, len(cars)
            if len(cars) > 0:
                for (x,y,w,h) in cars:
                    # img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
                    lw = max(2, img.shape[0] / 100)
                    cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),lw)
                    # roi_gray = gray[y:y+h, x:x+w]
                    # roi_color = img[y:y+h, x:x+w]

                # img = cv2.resize(img, dsize=None, fx=0.1, fy=0.1)
                # cv2.namedWindow("img", cv2.WINDOW_NORMAL)
                # cv2.resizeWindow('img', 500, 500)
                cv2.imshow('img', img)
                # cv2.imshow('img',img)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
项目:yolo_light    作者:chrisgundling    | 项目源码 | 文件源码
def camera(self, file, SaveVideo):
    if file == 'camera':
        file = 0
    else:
        assert os.path.isfile(file), \
        'file {} does not exist'.format(file)

    camera = cv2.VideoCapture(file)
    self.say('Press [ESC] to quit demo')
    assert camera.isOpened(), \
    'Cannot capture source'

    elapsed = int()
    start = timer()

    cv2.namedWindow('', 0)
    _, frame = camera.read()
    height, width, _ = frame.shape
    cv2.resizeWindow('', width, height)

    if SaveVideo:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if file == 0:
          fps = 1 / self._get_fps(frame)
          if fps < 1:
            fps = 1
        else:
            fps = round(camera.get(cv2.CAP_PROP_FPS))
        videoWriter = cv2.VideoWriter('video.avi', fourcc, fps, (width, height))

    while camera.isOpened():
        _, frame = camera.read()
        if frame is None:
            print ('\nEnd of Video')
            break
        preprocessed = self.framework.preprocess(frame)
        feed_dict = {self.inp: [preprocessed]}
        net_out = self.sess.run(self.out,feed_dict)[0]
        processed = self.framework.postprocess(net_out, frame, False)
        if SaveVideo:
            videoWriter.write(processed)
        cv2.imshow('', processed)
        elapsed += 1
        if elapsed % 5 == 0:
            sys.stdout.write('\r')
            sys.stdout.write('{0:3.3f} FPS'.format(
                elapsed / (timer() - start)))
            sys.stdout.flush()
        choice = cv2.waitKey(1)
        if choice == 27: break

    sys.stdout.write('\n')
    if SaveVideo:
        videoWriter.release()
    camera.release()
    cv2.destroyAllWindows()
项目:cat-bbs    作者:aleju    | 项目源码 | 文件源码
def update_window(win, inputs, outputs_gt, model):
    """Show true and generated outputs/heatmaps for example images."""
    model.eval()

    # prepare inputs and forward through network
    inputs, outputs_gt = torch.from_numpy(inputs), torch.from_numpy(outputs_gt)
    inputs, outputs_gt = Variable(inputs), Variable(outputs_gt)
    if GPU >= 0:
        inputs = inputs.cuda(GPU)
        outputs_gt = outputs_gt.cuda(GPU)
    outputs_pred = model(inputs)

    # draw rows of resulting image
    rows = []
    for i in range(inputs.size()[0]):
        # image, ground truth outputs, predicted outputs
        img_np = (inputs[i].cpu().data.numpy() * 255).astype(np.uint8).transpose(1, 2, 0)
        hm_gt_np = outputs_gt[i].cpu().data.numpy()
        hm_pred_np = outputs_pred[i].cpu().data.numpy()

        # per image
        #   first row: ground truth outputs,
        #   second row: predicted outputs
        # each row starts with the input image, followed by heatmap images
        row_truth = [img_np] + [draw_heatmap(img_np, np.squeeze(hm_gt_np[hm_idx]), alpha=0.5) for hm_idx in range(hm_gt_np.shape[0])]
        row_pred = [img_np] + [draw_heatmap(img_np, np.squeeze(hm_pred_np[hm_idx]), alpha=0.5) for hm_idx in range(hm_pred_np.shape[0])]

        rows.append(np.hstack(row_truth))
        rows.append(np.hstack(row_pred))
    grid = np.vstack(rows)

    if SHOW_DEBUG_WINDOWS:
        # show grid in opencv window
        if cv2.getWindowProperty(win, 0) == -1:
            cv2.namedWindow(win, cv2.WINDOW_NORMAL)
            cv2.resizeWindow(win, 1200, 600)
            time.sleep(1)
        cv2.imshow(win, grid.astype(np.uint8)[:, :, ::-1])
        cv2.waitKey(10)
    else:
        # save grid to file
        misc.imsave("window_%s.jpg" % (win,), grid.astype(np.uint8))