Python cv2 模块,__version__() 实例源码

我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用cv2.__version__()

项目:RFCN-tensorflow    作者:xdever    | 项目源码 | 文件源码
def __init__(self, path):
        self.path = path
        self.fps = 30
        self.currName="unknown"

        if os.path.isdir(self.path):
            self.type=self.DIR
            self.files = glob.glob(self.path+'/*.*')
            self.currFile = 0
        elif self.path.split('.')[-1].lower() in ['avi', 'mp4', 'mpeg', "mov"]:
            self.cap = cv2.VideoCapture(opt.i)
            self.frameIndex = 0
            self.type=self.VID
            if int((cv2.__version__).split('.')[0]) < 3:
                self.fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
            else:
                self.fps = cap.get(cv2.CAP_PROP_FPS)

            if self.fps<1:
                self.fps=1
        elif self.path.split('.')[-1].lower() in ['png','bmp','jpg','jpeg']:
            self.type=self.IMG
            self.fps=0
        else:
            print("Invalid file: "+self.path)
            sys.exit(-1)
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def create_blob_detector(roi_size=(128, 128), blob_min_area=3, 
                         blob_min_int=.5, blob_max_int=.95, blob_th_step=10):
    params = cv2.SimpleBlobDetector_Params()
    params.filterByArea = True
    params.minArea = blob_min_area
    params.maxArea = roi_size[0]*roi_size[1]
    params.filterByCircularity = False
    params.filterByColor = False
    params.filterByConvexity = False
    params.filterByInertia = False
    # blob detection only works with "uint8" images.
    params.minThreshold = int(blob_min_int*255)
    params.maxThreshold = int(blob_max_int*255)
    params.thresholdStep = blob_th_step
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        return cv2.SimpleBlobDetector(params)
    else:
        return cv2.SimpleBlobDetector_create(params)
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def initialize(self):
        # Initialize video capture
        self.cap = cv2.VideoCapture(self.ID)

        frameRate = 20.0
        frameWidth = 640
        frameHeight = 480

        if cv2.__version__[0] == "2":
            # Latest Stable Version (2.x)
            self.cap.set(cv2.cv.CV_CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameHeight)
        else:
            # version 3.1.0 (BETA)
            self.cap.set(cv2.CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)

        self.thresh = 0.4
        self.thresh_img = np.zeros((frameHeight, frameWidth, 3), dtype=np.uint8)
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def do(self, bin_img):

        tmp_bin_img = np.copy(bin_img)

        if cv2.__version__[0] == "2":
            contours, hierarchy = cv2.findContours(
                tmp_bin_img,
                cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, hierarchy = cv2.findContours(
                tmp_bin_img,
                cv2.RETR_CCOMP,
                cv2.CHAIN_APPROX_SIMPLE)

        filtered_contours = []
        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            if w * h > self.max_area or w * h < self.min_area:
                bin_img[y:y+h, x:x+w] = 0
        contours = filtered_contours
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def set_model(self,model,field=0):

        if model == 'perspective':
            self.model[field] = model
        elif model == 'fisheye':
            if opencv_major_version < 3:
                raise ValueError('Fisheye model fitting requires OpenCV 3 or newer, you have ' + cv2.__version__)
            else:
                self.model[field] = model
        else:
            raise ValueError('Unknown model type ' + model)


    # Based on the current fit parameters, returns the fit flags
    # in the format required by OpenCV fitting functions.
    # Output: fitflags (long int)
项目:ObjectDetection    作者:PhilippParis    | 项目源码 | 文件源码
def mask_to_objects(mask, threshold):
    """
    applies a blob detection algorithm to the image
    Args:
        mask: image mask scaled between 0 and 255 
        threshold: min pixel intensity of interest
    Returns:
        list of objects [(x,y)]
    """

    params = cv2.SimpleBlobDetector_Params()
    params.minThreshold = threshold
    params.maxThreshold = 255

    params.filterByArea = True
    params.minArea = 150
    params.maxArea = 10000

    params.filterByCircularity = False
    params.filterByInertia = False
    params.filterByConvexity = False
    params.filterByColor = False
    params.blobColor = 255

    # Create a detector with the parameters
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params)
    else: 
        detector = cv2.SimpleBlobDetector_create(params)

    keypoints = detector.detect(mask)

    objects = []
    for k in keypoints:
        objects.append(Rect(int(k.pt[0] - k.size), int(k.pt[1] - k.size), int(k.size * 2), int(k.size * 2)))

    return objects
# ============================================================= #
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)
        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size / 2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            if self.debug:
                self.save_hough(lines, clmap)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def __init__(self,
                 saved_model=None,
                 train_folder=None,
                 feature=_feature.__func__):
        """
        :param saved_model: optional saved train set and labels as .npz
        :param train_folder: optional custom train data to process
        :param feature: feature function - compatible with saved_model
        """
        self.feature = feature
        if train_folder is not None:
            self.train_set, self.train_labels, self.model = \
                self.create_model(train_folder)
        else:
            if cv2.__version__[0] == '2':
                self.model = cv2.KNearest()
            else:
                self.model = cv2.ml.KNearest_create()
            if saved_model is None:
                saved_model = TRAIN_DATA+'raw_pixel_data.npz'
            with np.load(saved_model) as data:
                self.train_set = data['train_set']
                self.train_labels = data['train_labels']
                if cv2.__version__[0] == '2':
                    self.model.train(self.train_set, self.train_labels)
                else:
                    self.model.train(self.train_set, cv2.ml.ROW_SAMPLE,
                                     self.train_labels)
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def create_model(self, train_folder):
        """
        Return the training set, its labels and the trained model
        :param train_folder: folder where to retrieve data
        :return: (train_set, train_labels, trained_model)
        """
        digits = []
        labels = []
        for n in range(1, 10):
            folder = train_folder + str(n)
            samples = [pic for pic in os.listdir(folder)
                       if os.path.isfile(os.path.join(folder, pic))]

            for sample in samples:
                image = cv2.imread(os.path.join(folder, sample))
                # Expecting black on white
                image = 255 - cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                _, image = cv2.threshold(image, 0, 255,
                                         cv2.THRESH_BINARY + cv2.THRESH_OTSU)
                feat = self.feature(image)
                digits.append(feat)
                labels.append(n)

        digits = np.array(digits, np.float32)
        labels = np.array(labels, np.float32)
        if cv2.__version__[0] == '2':
            model = cv2.KNearest()
            model.train(digits, labels)
        else:
            model = cv2.ml.KNearest_create()
            model.train(digits, cv2.ml.ROW_SAMPLE, labels)
        return digits, labels, model
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def classify(self, image):
        """
        Given a 28x28 image, returns an array representing the 2 highest
        probable prediction
        :param image:
        :return: array of 2 highest prob-digit tuples
        """
        if cv2.__version__[0] == '2':
            res = self.model.find_nearest(np.array([self.feature(image)]), k=11)
        else:
            res = self.model.findNearest(np.array([self.feature(image)]), k=11)
        hist = np.histogram(res[2], bins=9, range=(1, 10), normed=True)[0]
        zipped = sorted(zip(hist, np.arange(1, 10)), reverse=True)
        return np.array(zipped[:2])
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def is_grid(self, grid, image):
        """
        Checks the "gridness" by analyzing the results of a hough transform.
        :param grid: binary image
        :return: wheter the object in the image might be a grid or not
        """
        #   - Distance resolution = 1 pixel
        #   - Angle resolution = 1° degree for high line density
        #   - Threshold = 144 hough intersections
        #        8px digit + 3*2px white + 2*1px border = 16px per cell
        #           => 144x144 grid
        #        144 - minimum number of points on the same line
        #       (but due to imperfections in the binarized image it's highly
        #        improbable to detect a 144x144 grid)

        lines = cv2.HoughLines(grid, 1, np.pi / 180, 144)

        if lines is not None and np.size(lines) >= 20:
            lines = lines.reshape((lines.size/2), 2)
            # theta in [0, pi] (theta > pi => rho < 0)
            # normalise theta in [-pi, pi] and negatives rho
            lines[lines[:, 0] < 0, 1] -= np.pi
            lines[lines[:, 0] < 0, 0] *= -1

            criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01)
            # split lines into 2 groups to check whether they're perpendicular
            if cv2.__version__[0] == '2':
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)
            else:
                density, clmap, centers = cv2.kmeans(
                    lines[:, 1], 2, None, criteria,
                    5, cv2.KMEANS_RANDOM_CENTERS)

            # Overall variance from respective centers
            var = density / np.size(clmap)
            sin = abs(np.sin(centers[0] - centers[1]))
            # It is probably a grid only if:
            #   - centroids difference is almost a 90° angle (+-15° limit)
            #   - variance is less than 5° (keeping in mind surface distortions)
            return sin > 0.99 and var <= (5*np.pi / 180) ** 2
        else:
            return False
项目:1m-agents    作者:geek-ai    | 项目源码 | 文件源码
def make_video(self, images, outvid=None, fps=5, size=None, is_color=True, format="XVID"):
        """
        Create a video from a list of images.
        @param      outvid      output video
        @param      images      list of images to use in the video
        @param      fps         frame per second
        @param      size        size of each frame
        @param      is_color    color
        @param      format      see http://www.fourcc.org/codecs.php
        """
        # fourcc = VideoWriter_fourcc(*format)
        # For opencv2 and opencv3:
        if int(cv2.__version__[0]) > 2:
            fourcc = cv2.VideoWriter_fourcc(*format)
        else:
            fourcc = cv2.cv.CV_FOURCC(*format)
        vid = None
        for image in images:
            assert os.path.exists(image)
            img = imread(image)
            if vid is None:
                if size is None:
                    size = img.shape[1], img.shape[0]
                vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
            if size[0] != img.shape[1] and size[1] != img.shape[0]:
                img = resize(img, size)
            vid.write(img)
        vid.release()
项目:1m-agents    作者:geek-ai    | 项目源码 | 文件源码
def make_video(self, images, outvid=None, fps=5, size=None, is_color=True, format="XVID"):
        """
        Create a video from a list of images.
        @param      outvid      output video
        @param      images      list of images to use in the video
        @param      fps         frame per second
        @param      size        size of each frame
        @param      is_color    color
        @param      format      see http://www.fourcc.org/codecs.php
        """
        # fourcc = VideoWriter_fourcc(*format)
        # For opencv2 and opencv3:
        if int(cv2.__version__[0]) > 2:
            fourcc = cv2.VideoWriter_fourcc(*format)
        else:
            fourcc = cv2.cv.CV_FOURCC(*format)
        vid = None
        for image in images:
            assert os.path.exists(image)
            img = imread(image)
            if vid is None:
                if size is None:
                    size = img.shape[1], img.shape[0]
                vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
            if size[0] != img.shape[1] and size[1] != img.shape[0]:
                img = resize(img, size)
            vid.write(img)
        vid.release()
项目:1m-agents    作者:geek-ai    | 项目源码 | 文件源码
def make_video(images, outvid=None, fps=5, size=None, is_color=True, format="XVID"):
    """
    Create a video from a list of images.
    @param      outvid      output video
    @param      images      list of images to use in the video
    @param      fps         frame per second
    @param      size        size of each frame
    @param      is_color    color
    @param      format      see http://www.fourcc.org/codecs.php
    """
    # fourcc = VideoWriter_fourcc(*format)
    # For opencv2 and opencv3:
    if int(cv2.__version__[0]) > 2:
        fourcc = cv2.VideoWriter_fourcc(*format)
    else:
        fourcc = cv2.cv.CV_FOURCC(*format)
    vid = None
    for image in images:
        assert os.path.exists(image)
        img = imread(image)
        if vid is None:
            if size is None:
                size = img.shape[1], img.shape[0]
            vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
        if size[0] != img.shape[1] and size[1] != img.shape[0]:
            img = resize(img, size)
        vid.write(img)
    vid.release()
项目:1m-agents    作者:geek-ai    | 项目源码 | 文件源码
def make_video(self, images, outvid=None, fps=5, size=None, is_color=True, format="XVID"):
        """
        Create a video from a list of images.
        @param      outvid      output video
        @param      images      list of images to use in the video
        @param      fps         frame per second
        @param      size        size of each frame
        @param      is_color    color
        @param      format      see http://www.fourcc.org/codecs.php
        """
        # fourcc = VideoWriter_fourcc(*format)
        # For opencv2 and opencv3:
        if int(cv2.__version__[0]) > 2:
            fourcc = cv2.VideoWriter_fourcc(*format)
        else:
            fourcc = cv2.cv.CV_FOURCC(*format)
        vid = None
        for image in images:
            assert os.path.exists(image)
            img = imread(image)
            if vid is None:
                if size is None:
                    size = img.shape[1], img.shape[0]
                vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
            if size[0] != img.shape[1] and size[1] != img.shape[0]:
                img = resize(img, size)
            vid.write(img)
        vid.release()
项目:camera_calibration_frontend    作者:groundmelon    | 项目源码 | 文件源码
def cal_fromcorners(self, good):
        # Perform monocular calibrations
        lcorners = [(l, b) for (l, r, b) in good]
        rcorners = [(r, b) for (l, r, b) in good]
        self.l.cal_fromcorners(lcorners)
        self.r.cal_fromcorners(rcorners)

        lipts = [ l for (l, _, _) in good ]
        ripts = [ r for (_, r, _) in good ]
        boards = [ b for (_, _, b) in good ]

        opts = self.mk_object_points(boards, True)

        flags = cv2.CALIB_FIX_INTRINSIC

        self.T = numpy.zeros((3, 1), dtype=numpy.float64)
        self.R = numpy.eye(3, dtype=numpy.float64)
        if LooseVersion(cv2.__version__).version[0] == 2:
            cv2.stereoCalibrate(opts, lipts, ripts, self.size,
                               self.l.intrinsics, self.l.distortion,
                               self.r.intrinsics, self.r.distortion,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)
        else:
            cv2.stereoCalibrate(opts, lipts, ripts,
                               self.l.intrinsics, self.l.distortion,
                               self.r.intrinsics, self.r.distortion,
                               self.size,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)

        self.set_alpha(0.0)
项目:Splipy    作者:sintefmath    | 项目源码 | 文件源码
def image_height(filename, N=[30,30], p=[4,4]):
    """Generate a B-spline surface approximation given by the heightmap in a
    grayscale image.

    :param str filename: Name of image file to read
    :param (int,int) N: Number of controlpoints in u-direction
    :param (int,int) p: Polynomial order (degree+1)
    :return: Normalized (all values between 0 and 1) heightmap approximation
    :rtype: :class:`splipy.Surface`
    """

    import cv2

    im = cv2.imread(filename)

    width  = len(im)
    height = len(im[0])

    # initialize image holder
    imGrey = np.zeros((len(im),   len(im[0])),   np.uint8)

    # convert to greyscale image
    if cv2.__version__[0] == '2':
        cv2.cvtColor(im, cv2.cv.CV_RGB2GRAY, imGrey)
    else:
        cv2.cvtColor(im, cv2.COLOR_RGB2GRAY, imGrey)

    pts = []
    # guess uniform evaluation points and knot vectors
    u = range(width)
    v = range(height)
    knot1 = [0]*3 + range(N[0]-p[0]+2) + [N[0]-p[0]+1]*3
    knot2 = [0]*3 + range(N[0]-p[0]+2) + [N[0]-p[0]+1]*3

    # normalize all values to be in range [0, 1]
    u     = [float(i)/u[-1]     for i in u]
    v     = [float(i)/v[-1]     for i in v]
    knot1 = [float(i)/knot1[-1] for i in knot1]
    knot2 = [float(i)/knot2[-1] for i in knot2]

    for j in range(height):
        for i in range(width):
            pts.append([v[j], u[i], float(imGrey[width-i-1][j])/255.0*1.0])

    basis1 = BSplineBasis(4, knot1)
    basis2 = BSplineBasis(4, knot2)

    return surface_factory.least_square_fit(pts,[basis1, basis2], [u,v])
项目:facemoji    作者:PiotrDabrowskey    | 项目源码 | 文件源码
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10):
    """
    Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces.
    :param model: Learnt emotion detection model.
    :param emoticons: List of emotions images.
    :param window_size: Size of webcam image window.
    :param window_name: Name of webcam image window.
    :param update_time: Image update time interval.
    """
    cv2.namedWindow(window_name, WINDOW_NORMAL)
    if window_size:
        width, height = window_size
        cv2.resizeWindow(window_name, width, height)

    vc = cv2.VideoCapture(0)
    if vc.isOpened():
        read_value, webcam_image = vc.read()
    else:
        print("webcam not found")
        return

    while read_value:
        for normalized_face, (x, y, w, h) in find_faces(webcam_image):
            prediction = model.predict(normalized_face)  # do prediction
            if cv2.__version__ != '3.1.0':
                prediction = prediction[0]

            image_to_draw = emoticons[prediction]
            draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h))

        cv2.imshow(window_name, webcam_image)
        read_value, webcam_image = vc.read()
        key = cv2.waitKey(update_time)

        if key == 27:  # exit on ESC
            break

    cv2.destroyWindow(window_name)
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def sample_hard_negatives(img, roi_mask, out_dir, img_id, abn,  
                          patch_size=256, neg_cutoff=.35, nb_bkg=100, 
                          start_sample_nb=0,
                          bkg_dir='background', verbose=False):
    '''WARNING: the definition of hns may be problematic.
    There has been study showing that the context of an ROI is also useful
    for classification.
    '''
    bkg_out = os.path.join(out_dir, bkg_dir)
    basename = '_'.join([img_id, str(abn)])

    img = add_img_margins(img, patch_size/2)
    roi_mask = add_img_margins(roi_mask, patch_size/2)
    # Get ROI bounding box.
    roi_mask_8u = roi_mask.astype('uint8')
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        contours,_ = cv2.findContours(
            roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    else:
        _,contours,_ = cv2.findContours(
            roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cont_areas = [ cv2.contourArea(cont) for cont in contours ]
    idx = np.argmax(cont_areas)  # find the largest contour.
    rx,ry,rw,rh = cv2.boundingRect(contours[idx])
    if verbose:
        M = cv2.moments(contours[idx])
        cx = int(M['m10']/M['m00'])
        cy = int(M['m01']/M['m00'])
        print "ROI centroid=", (cx,cy); sys.stdout.flush()

    rng = np.random.RandomState(12345)
    # Sample hard negative samples.
    sampled_bkg = start_sample_nb
    while sampled_bkg < start_sample_nb + nb_bkg:
        x1,x2 = (rx - patch_size/2, rx + rw + patch_size/2)
        y1,y2 = (ry - patch_size/2, ry + rh + patch_size/2)
        x1 = crop_val(x1, patch_size/2, img.shape[1] - patch_size/2)
        x2 = crop_val(x2, patch_size/2, img.shape[1] - patch_size/2)
        y1 = crop_val(y1, patch_size/2, img.shape[0] - patch_size/2)
        y2 = crop_val(y2, patch_size/2, img.shape[0] - patch_size/2)
        x = rng.randint(x1, x2)
        y = rng.randint(y1, y2)
        if not overlap_patch_roi((x,y), patch_size, roi_mask, cutoff=neg_cutoff):
            patch = img[y - patch_size/2:y + patch_size/2, 
                        x - patch_size/2:x + patch_size/2]
            patch = patch.astype('int32')
            patch_img = toimage(patch, high=patch.max(), low=patch.min(), 
                                mode='I')
            filename = basename + "_%04d" % (sampled_bkg) + ".png"
            fullname = os.path.join(bkg_out, filename)
            patch_img.save(fullname)
            sampled_bkg += 1
            if verbose:
                print "sampled a hns patch at (x,y) center=", (x,y)
                sys.stdout.flush()
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def sample_blob_negatives(img, roi_mask, out_dir, img_id, abn, blob_detector, 
                          patch_size=256, neg_cutoff=.35, nb_bkg=100, 
                          start_sample_nb=0,
                          bkg_dir='background', verbose=False):
    bkg_out = os.path.join(out_dir, bkg_dir)
    basename = '_'.join([img_id, str(abn)])

    img = add_img_margins(img, patch_size/2)
    roi_mask = add_img_margins(roi_mask, patch_size/2)
    # Get ROI bounding box.
    roi_mask_8u = roi_mask.astype('uint8')
    ver = (cv2.__version__).split('.')
    if int(ver[0]) < 3:
        contours,_ = cv2.findContours(
            roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    else:
        _,contours,_ = cv2.findContours(
            roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cont_areas = [ cv2.contourArea(cont) for cont in contours ]
    idx = np.argmax(cont_areas)  # find the largest contour.
    rx,ry,rw,rh = cv2.boundingRect(contours[idx])
    if verbose:
        M = cv2.moments(contours[idx])
        cx = int(M['m10']/M['m00'])
        cy = int(M['m01']/M['m00'])
        print "ROI centroid=", (cx,cy); sys.stdout.flush()

    # Sample blob negative samples.
    key_pts = blob_detector.detect((img/img.max()*255).astype('uint8'))
    rng = np.random.RandomState(12345)
    key_pts = rng.permutation(key_pts)
    sampled_bkg = 0
    for kp in key_pts:
        if sampled_bkg >= nb_bkg:
            break
        x,y = int(kp.pt[0]), int(kp.pt[1])
        if not overlap_patch_roi((x,y), patch_size, roi_mask, cutoff=neg_cutoff):
            patch = img[y - patch_size/2:y + patch_size/2, 
                        x - patch_size/2:x + patch_size/2]
            patch = patch.astype('int32')
            patch_img = toimage(patch, high=patch.max(), low=patch.min(), 
                                mode='I')
            filename = basename + "_%04d" % (start_sample_nb + sampled_bkg) + ".png"
            fullname = os.path.join(bkg_out, filename)
            patch_img.save(fullname)
            if verbose:
                print "sampled a blob patch at (x,y) center=", (x,y)
                sys.stdout.flush()
            sampled_bkg += 1
    return sampled_bkg

#### End of function definition ####
项目:dream2016_dm    作者:lishen    | 项目源码 | 文件源码
def segment_breast(cls, img, low_int_threshold=.05, crop=True):
        '''Perform breast segmentation
        Args:
            low_int_threshold([float or int]): Low intensity threshold to 
                    filter out background. It can be a fraction of the max 
                    intensity value or an integer intensity value.
            crop ([bool]): Whether or not to crop the image.
        Returns:
            An image of the segmented breast.
        NOTES: the low_int_threshold is applied to an image of dtype 'uint8',
            which has a max value of 255.
        '''
        # Create img for thresholding and contours.
        img_8u = (img.astype('float32')/img.max()*255).astype('uint8')
        if low_int_threshold < 1.:
            low_th = int(img_8u.max()*low_int_threshold)
        else:
            low_th = int(low_int_threshold)
        _, img_bin = cv2.threshold(
            img_8u, low_th, maxval=255, type=cv2.THRESH_BINARY)
        ver = (cv2.__version__).split('.')
        if int(ver[0]) < 3:
            contours,_ = cv2.findContours(
                img_bin.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        else:
            _,contours,_ = cv2.findContours(
                img_bin.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cont_areas = [ cv2.contourArea(cont) for cont in contours ]
        idx = np.argmax(cont_areas)  # find the largest contour, i.e. breast.
        breast_mask = cv2.drawContours(
            np.zeros_like(img_bin), contours, idx, 255, -1)  # fill the contour.
        # segment the breast.
        img_breast_only = cv2.bitwise_and(img, img, mask=breast_mask)
        x,y,w,h = cv2.boundingRect(contours[idx])
        if crop:
            img_breast_only = img_breast_only[y:y+h, x:x+w]
        return img_breast_only, (x,y,w,h)
项目:spockpy    作者:achillesrasquinha    | 项目源码 | 文件源码
def _get_opencv_version():
    version = cv2.__version__
    version = version.split('.')

    major, minor, patch = int(version[0]), int(version[1]), int(version[2])

    return (major, minor, patch)
项目:sql_python_deep_learning    作者:Azure    | 项目源码 | 文件源码
def print_library_version():
    print(os.getcwd())
    version_pandas = pkg_resources.get_distribution("pandas").version
    print("Version pandas: {}".format(version_pandas))
    print("Version OpenCV: {}".format(cv2.__version__))
    version_cntk = pkg_resources.get_distribution("cntk").version
    print("Version CNTK: {}".format(version_cntk))
    cntk.logging.set_trace_level(2)
    print("Devices used by CNTK: {}".format(cntk.all_devices()))



######################################################################
# for feature generation
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def readNight(self):
        sum_image = None
        count = 0

        while True:
            if self.capture.grab():
                if cv2.__version__[0] == '3':
                    _, image = self.capture.retrieve(flag = self.channel)
                else:
                    _, image = self.capture.retrieve(channel = self.channel)

                image1 = np.int32(image)

                if self.last_image is not None:
                    difference = image1 - self.last_image
                else:
                    difference = np.array([0])

                difference = float(np.sum(np.abs(difference))) / float(difference.size)

                if sum_image is None:
                    sum_image = self.last_image
                else:
                    sum_image += self.last_image

                count += 1

                self.last_image = image1

                if self.differenceChecker.check(difference):
                    SkyCamera.log('Difference: %f %d 1' % (difference, count))

                    time = Time.now()

                    self.checkDaytime()

                    return np.uint8(sum_image / count), time
                else:
                    SkyCamera.log('Difference: %f %d 0' % (difference, count))
            else:
                return None, None
项目:pynephoscope    作者:neXyon    | 项目源码 | 文件源码
def read(self):
        sum_image = None
        image = None

        count = Configuration.day_averaging_frames

        if self.night:
            count = Configuration.night_averaging_frames

        for i in range(count):
            if self.capture.grab():
                if cv2.__version__[0] == '3':
                    _, image = self.capture.retrieve(flag = self.channel)
                else:
                    _, image = self.capture.retrieve(channel = self.channel)

                if sum_image is None:
                    sum_image = np.int32(image)
                else:
                    sum_image += np.int32(image)

        time = Time.now()

        self.checkDaytime()

        return np.uint8(sum_image / count), time
项目:catFaceSwapSendToTodd    作者:Micasou    | 项目源码 | 文件源码
def main():
    # Make sure OpenCV is version 3.0 or above
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    if int(major_ver) < 3 :
        print >>sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'
        sys.exit(1)
    catFileName = "testcat5.jpg"
    catFileNameOutter = "Cat0.jpg"
    catFileName2 = "testcat2.jpg"
    filename1 = 'testcat.jpg'
    filename2 = 'donald_trump.jpg'
    filename3 = 'ted_cruz.jpg'
    todd = 'todd.jpg'

    catbounds = detectCatFace(catFileName)
    othwerCatBounds = detectCatFace(catFileNameOutter)
    todbounds = detectHumanFaceRect(todd)
    humanbounds = detectHumanFaceRect(filename3)
    swap(catFileName,filename3,catbounds,humanbounds)
    swap(todd,catFileName,todbounds,catbounds)
    #swap(todd,catFileName,todbounds,catbounds)
    #swap(catFileName,todd,catbounds,todbounds)
    print catbounds
    print humanbounds
    #swap(filename3,filename1,filename2)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def remove_noise_by_contours(self, bin_img):
        c_bin_img = np.copy(bin_img)
        min_area = 100
        max_area = bin_img.shape[0] * bin_img.shape[1]
        min_w = 10
        min_h = 10
        if cv2.__version__[0] == "2":
            contours, hierarchy = cv2.findContours(
                c_bin_img,
                cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, hierarchy = cv2.findContours(
                c_bin_img,
                cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)

        filtered_contours = []
        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            if w * h >= min_area and (h >= min_h \
                    or w >= min_w) and w * h <= max_area:
                filtered_contours.append(cnt)
            else:
                bin_img[y:y+h, x:x+w] = 0
        contours = filtered_contours
        return bin_img
项目:PKM2    作者:Szonek    | 项目源码 | 文件源码
def checkOpennCVVersion():
    cv2Ver = cv2.__version__
    if(cv2Ver != '2.4.13'):
        sys.exit()
    else:
        print cv2.__version__ ## be sure that its opencv 2.4.13
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def is_cv2():
    # if we are using OpenCV 2, then our cv2.__version__ will start
    # with '2.'
    return check_opencv_version("2.")
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def is_cv3():
    # if we are using OpenCV 3.X, then our cv2.__version__ will start
    # with '3.'
    return check_opencv_version("3.")
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def check_opencv_version(major, lib=None):
    # if the supplied library is None, import OpenCV
    if lib is None:
        import cv2 as lib

    # return whether or not the current OpenCV version matches the
    # major version number
    return lib.__version__.startswith(major)
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def normalise(self,x,y,field):

        if np.shape(x) != np.shape(y):
            raise ValueError("X and Y input arrays must be the same size!")

        if self.fit_params[field].model == 'fisheye' and opencv_major_version < 3:
            raise Exception('Fisheye model distortion calculation requires OpenCV 3 or newer! Your version is ' + cv2.__version__)

        # Flatten everything and create output array        
        oldshape = np.shape(x)
        x = np.reshape(x,np.size(x),order='F')
        y = np.reshape(y,np.size(y),order='F')

        input_points = np.zeros([x.size,1,2])
        for point in range(len(x)):
            input_points[point,0,0] = x[point]
            input_points[point,0,1] = y[point]

        if self.fit_params[field].model == 'perspective':
            undistorted = cv2.undistortPoints(input_points,self.fit_params[field].cam_matrix,self.fit_params[field].kc)
        elif self.fit_params[field].model == 'fisheye':
            undistorted = cv2.fisheye.undistortPoints(input_points,self.fit_params[field].cam_matrix,self.fit_params[field].kc)

        undistorted = np.swapaxes(undistorted,0,1)[0]

        return np.reshape(undistorted[:,0],oldshape,order='F') , np.reshape(undistorted[:,1],oldshape,order='F')


    # Get the sight-line direction(s) for given pixel coordinates, as unit vector(s) in the lab frame.
    # Input: x_pixel and y_pixel - array-like, x and y pixel coordinates (floats or arrays/lists of floats)
    # Optional inputs: ForceField - for split field cameras, get the sight line direction as if the pixel
    #                               was part of the specified subfield, even if it isn't really (int)
    #                  Coords - whether the input x_pixel and y_pixel values are in display or original 
    #                           coordimates (default Display; string either 'Display' or 'Original')
    # Output: Numpy array with 1 more dimension than the input x_pixels and y_pixels, but otherwise
    #         the same size and shape. The extra dimension indexes the 3 vector components, 0 = X, 1 = Y, 2 = Z.
    #         This is a unit vector in the CAD model coordinate system.
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def load(self,SaveName):

        # File to load from
        SaveFile = open(os.path.join(paths.virtualcameras,SaveName + '.pickle'),'rb')

        try:
            save = pickle.load(SaveFile)
        except:
            SaveFile.seek(0)
            save = pickle.load(SaveFile,encoding='latin1')

        self.nfields = save['nfields']
        self.fieldmask = save['field_mask']
        self.image_display_shape = save['image_display_shape']


        if 'field_names' in save:
            self.field_names = save['field_names']
        else:
            if self.nfields == 1:
                self.field_names = ['Image']
            else:
                self.field_names = []
                for field in range(self.nfields):
                    self.field_names.append('Sub FOV # {:d}'.format(field+1))
        self.fit_params = []
        for field in range(self.nfields):
                if 'model' in save:
                    if save['model'][field] == 'fisheye' and opencv_major_version < 3:
                        raise Exception('This calibration result uses the fisheye camera model and requires OpenCV3 to be loaded (you are using {:s}).'.format(cv2.__version__))
                    self.fit_params.append(FieldFit(save['model'][field],save['fitparams'][field],from_save=True))
                else:
                    self.fit_params.append(FieldFit('perspective',save['fitparams'][field],from_save=True))

        self.transform = CoordTransformer()
        self.transform.x_pixels = save['transform_pixels'][0]
        self.transform.y_pixels = save['transform_pixels'][1]

        # Due to a bug in early versions of the view designer, some virtual calibrations' transformers
        # were left with x_pixels and y_pixels set to None. This is to fix things when loading
        # virtual calibration objects made with the buggy version (and has no effect for newer versions).
        if self.transform.x_pixels is None or self.transform.y_pixels is None:
            self.transform.x_pixels = self.image_display_shape[0]
            self.transform.y_pixels = self.image_display_shape[1]

        self.transform.pixel_aspectratio = save['transform_pixel_aspect']
        self.transform.set_transform_actions(save['transform_actions'])
        SaveFile.close()


# Small class for storing the actual fit output parameters for a sub-field.
# Instances of this are used inside the CalibResults class.
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def __init__(self):
        if cv2_version < 2.4 or (cv2_version == 2.4 and cv2_micro_version < 6):
          raise Exception('Histogram equalisation requires OpenCV 2.4.6 or newer; you have {:s}'.format(cv2.__version__))
项目:calcam    作者:euratom-software    | 项目源码 | 文件源码
def __init__(self, parent,modelselection=False):

        # GUI initialisation
        qt.QDialog.__init__(self, parent)
        qt.uic.loadUi(os.path.join(paths.ui,'chessboard_image_dialog.ui'), self)

        self.parent = parent
        try:
            self.image_transformer = self.parent.image.transform
        except:
            self.image_transformer = CoordTransformer()

        if not modelselection:
            self.model_options.hide()


        # Callbacks for GUI elements
        self.load_images_button.clicked.connect(self.load_images)
        self.detect_chessboard_button.clicked.connect(self.detect_corners)
        self.apply_button.clicked.connect(self.apply)
        self.cancel_button.clicked.connect(self.reject)
        self.next_im_button.clicked.connect(self.change_image)
        self.prev_im_button.clicked.connect(self.change_image)
        self.current_image = None

        if int(fitting.cv2.__version__[0]) < 3:
            self.fisheye_model.setEnabled(False)
            self.fisheye_model.setToolTip('Requires OpenCV 3')

        # Sort out pyplot setup for showing the images
        im_figure = plt.figure()
        self.mplwidget = FigureCanvas(im_figure)
        self.mplwidget.hide()
        self.im_control_bar.hide()
        self.imax = im_figure.add_subplot(111)
        plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
        self.imax.get_xaxis().set_visible(False)
        self.imax.get_yaxis().set_visible(False)
        self.image_frame.layout().addWidget(self.mplwidget,1)

        self.detection_run = False

        self.images = []
        self.filenames = []

        self.pointpairs_result = None

        # Start the GUI!
        self.show()
项目:video-classification    作者:canhnd58    | 项目源码 | 文件源码
def normalize(path, **kwargs):
    reso = kwargs.pop('reso', (WIDTH, HEIGHT))
    seconds = kwargs.pop('sec', SECONDS)
    fps = kwargs.pop('fps', FPS)
    remove = kwargs.pop('remove', True)
    log = kwargs.pop('log', True)

    if platform == "linux" or platform == "linux2":
        codec = LINUX_CODEC
    elif platform == "darwin":
        codec = MAC_CODEC
    else:
        raise Exception("Unsupported Platform")

    codec = kwargs.pop('codec', codec)

    if not os.path.isfile(path):
        raise Exception('%s not found!' % (path, ))

    audio_path = path[0:-4] + '.tmp.wav'
    command = "ffmpeg -i %s %s -y" % (path, audio_path)
    download_log = open("download.log", 'w') if log else None
    subprocess.call(command, shell=True, stdout=download_log, stderr=subprocess.STDOUT)

    cap = cv2.VideoCapture(path)
    cv2_version = cv2.__version__[0]

    if cv2_version == '3':
        fourcc = cv2.VideoWriter_fourcc(*codec)
    elif cv2_version == '2':
        fourcc = cv2.cv.CV_FOURCC(*codec)
    else:
        raise Exception('Unsupported OpenCV version!')

    out_path = path[0:-4] + '.tmp.avi'
    out = cv2.VideoWriter(out_path, fourcc, fps, reso, True)

    frame_count = fps * seconds
    while(cap.isOpened()):
        if frame_count <= 0: break
        frame_count -= 1
        ret, frame = cap.read()

        if not ret: continue
        resized = cv2.resize(frame, reso)
        out.write(resized)

    cap.release()
    out.release()
    if remove: os.unlink(path)

    return (audio_path, out_path)
项目:rekognition-video-utils    作者:awslabs    | 项目源码 | 文件源码
def get_frame_rate(video):
    # Find OpenCV version
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    if int(major_ver)  < 3 :
        fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
    else:
        fps = video.get(cv2.CAP_PROP_FPS)
    print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)

    return fps