Python cv2 模块,error() 实例源码

我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用cv2.error()

项目:vision    作者:SouthEugeneRoboticsTeam    | 项目源码 | 文件源码
def get_blob(im, lower, upper):
    # Finds a blob, if one exists

    # Create mask of green
    try:
        green_mask = cv2.inRange(im, lower, upper)
    except cv2.error:
        # Catches the case where there is no blob in range
        return None, None

    # Get largest blob
    largest = get_largest(green_mask, 1)
    second_largest = get_largest(green_mask, 2)

    if largest is not None and second_largest is not None:
        return [largest, second_largest], green_mask
    else:
        return None, None
项目:kaggle-dstl    作者:lopuhin    | 项目源码 | 文件源码
def _aligned(im_ref, im, im_to_align=None, key=None):
    w, h = im.shape[:2]
    im_ref = cv2.resize(im_ref, (h, w), interpolation=cv2.INTER_CUBIC)
    im_ref = _preprocess_for_alignment(im_ref)
    if im_to_align is None:
        im_to_align = im
    im_to_align = _preprocess_for_alignment(im_to_align)
    assert im_ref.shape[:2] == im_to_align.shape[:2]
    try:
        cc, warp_matrix = _get_alignment(im_ref, im_to_align, key)
    except cv2.error as e:
        logger.info('Error getting alignment: {}'.format(e))
        return im, False
    else:
        im = cv2.warpAffine(im, warp_matrix, (h, w),
                            flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
        im[im == 0] = np.mean(im)
        return im, True
项目:ihradis-cnn-deblur    作者:sandeepraju    | 项目源码 | 文件源码
def run(self):
        for name in self.fileList:
            try:
                name = self.imageDir + name
                imOrg = cv2.imread(name)
                if imOrg is None:
                    print(name, " ERROR - could not read image.", file=sys.stderr)
                    self.imageQueue.put(False)
                else:
                    if self.scaleFactor != 1.0:
                        imOrg = cv2.resize(imOrg, dsize=(0,0), fx=self.scaleFactor, fy=self.scaleFactor, interpolation=cv2.INTER_AREA)
                    self.imageQueue.put(imOrg)
            except cv2.error as e:
                print(name, " ERROR - cv2.error", str(e), file=sys.stderr)
                self.imageQueue.put(False)
            except:
                print(name, " ERROR - UNKNOWN:", sys.exc_info()[0], file=sys.stderr)                
                self.imageQueue.put(False)

        self.imageQueue.put(None)
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)

        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and y2 > self.yThreshold and x2 < self.xThreshold:
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster = self.compare_distances(train_img, cluster)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, good_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def __seek_calib_limit(self, video, frame_range, max_miss_count=3, verbose=True):
        frame_range_signed_length = frame_range[1] - frame_range[0]
        sample_interval_frames = frame_range_signed_length // 2
        failed_attempts = 0
        while sample_interval_frames != 0:
            miss_count = 0
            try:
                if verbose:
                    if sample_interval_frames < 0:
                        print("\nSampling every {:d} frames within {:s}, backwards."
                              .format(-sample_interval_frames, str((frame_range[1], frame_range[0]))))
                    else:
                        print("\nSampling every {:d} frames within {:s}.".format(sample_interval_frames,
                                                                                 str(frame_range)))
                for i_frame in range(frame_range[0], frame_range[1], sample_interval_frames):
                    video.read_at_pos(i_frame)
                    if verbose:
                        print('.', end="", flush=True)
                    if video.try_approximate_corners(self.board_dims):
                        frame_range[0] = i_frame
                        miss_count = 0
                    else:
                        miss_count += 1
                        if miss_count > max_miss_count:
                            # too many frames w/o calibration board, highly unlikely those are all bad frames,
                            # go to finer scan
                            frame_range[1] = i_frame
                            break
                sample_interval_frames = round(sample_interval_frames / 2)
            except cv2.error as e:
                failed_attempts += 1
                if failed_attempts > 2:
                    raise RuntimeError("Too many failed attempts. Frame index: " + str(i_frame))
                print("FFmpeg hickup, attempting to reopen video.")
                video.reopen()  # workaround for ffmpeg AVC/H.264 bug
        return frame_range[0]
项目:cvcalib    作者:Algomorph    | 项目源码 | 文件源码
def find_camera_poses(self, verbose=False):
        ix_cam = 0
        for video in self.videos:
            camera = self.cameras[ix_cam]
            if verbose:
                print("Finding camera poses for video {:s} ... (this may take awhile)".format(video.name))
            video.poses = []
            rotations, translations = calibrate_intrinsics(camera, video.image_points,
                                                           self.board_object_corner_set,
                                                           self.args.use_rational_model,
                                                           self.args.use_tangential_coeffs,
                                                           self.args.use_thin_prism,
                                                           fix_radial=True,
                                                           fix_thin_prism=True,
                                                           max_iterations=1,
                                                           use_existing_guess=True,
                                                           test=True)
            if verbose:
                print("Camera pose reprojection error for video {:s}: {:.4f}"
                      .format(video.name, camera.intrinsics.error))
            for ix_pose in range(len(rotations)):
                translation = translations[ix_pose]
                rotation = rotations[ix_pose]
                pose = Pose(rotation=rotation, translation_vector=translation)
                video.poses.append(pose)

            ix_cam += 1
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _toSize(img):
    fac = MAX_SIZE / max(img.shape)
    if fac < 1:
        try:
            return cv2.resize(img, (0, 0), fx=fac, fy=fac,
                              interpolation=cv2.INTER_AREA)
        except cv2.error:
            # cv2.error: ..\..\..\modules\imgproc\src\imgwarp.cpp:3235: error:
            # (-215) dsize.area() > 0 in function cv::resize
            return cv2.resize(img.T, (0, 0), fx=fac, fy=fac,
                              interpolation=cv2.INTER_AREA).T
    return img
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def signalMinimum(img, fitParams=None, n_std=3):
    '''
    intersection between signal and background peak
    '''
    if fitParams is None:
        fitParams = FitHistogramPeaks(img).fitParams
    assert len(fitParams) > 1, 'need 2 peaks so get minimum signal'

    i = signalPeakIndex(fitParams)
    signal = fitParams[i]
    bg = getBackgroundPeak(fitParams)
    smn = signal[1] - n_std * signal[2]
    bmx = bg[1] + n_std * bg[2]
    if smn > bmx:
        return smn
    # peaks are overlapping
    # define signal min. as intersection between both Gaussians

    def solve(p1, p2):
        s1, m1, std1 = p1
        s2, m2, std2 = p2
        a = (1 / (2 * std1**2)) - (1 / (2 * std2**2))
        b = (m2 / (std2**2)) - (m1 / (std1**2))
        c = (m1**2 / (2 * std1**2)) - (m2**2 / (2 * std2**2)) - \
            np.log(((std2 * s1) / (std1 * s2)))
        return np.roots([a, b, c])
    i = solve(bg, signal)
    try:
        return i[np.logical_and(i > bg[1], i < signal[1])][0]
    except IndexError:
        # this error shouldn't occur... well
        return max(smn, bmx)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def scaleParamsFromReference(img, reference):
    # saving startup time:
    from scipy.optimize import curve_fit

    def ff(arr):
        arr = imread(arr, 'gray')
        if arr.size > 300000:
            arr = arr[::10, ::10]
        m = np.nanmean(arr)
        s = np.nanstd(arr)
        r = m - 3 * s, m + 3 * s
        b = (r[1] - r[0]) / 5
        return arr, r, b

    img, imgr, imgb = ff(img)
    reference, refr, refb = ff(reference)

    nbins = np.clip(15, max(imgb, refb), 50)

    refh = np.histogram(reference, bins=nbins, range=refr)[
        0].astype(np.float32)
    imgh = np.histogram(img, bins=nbins, range=imgr)[0].astype(np.float32)

    import pylab as plt
    plt.figure(1)
    plt.plot(refh)

    plt.figure(2)
    plt.plot(imgh)
    plt.show()

    def fn(x, offs, div):
        return (x - offs) / div

    params, fitCovariances = curve_fit(fn, refh, imgh, p0=(0, 1))
    perr = np.sqrt(np.diag(fitCovariances))
    print('error scaling to reference image: %s' % perr[0])
    # if perr[0] < 0.1:
    return params[0], params[1]
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def run(self):
        try:
            out = self.runfn()
        except (cv2.error, Exception, AssertionError) as e:
            if type(e) is cv2.error:
                print(e)
            self.progressBar.cancel.click()
            self._exc_info = sys.exc_info()
            return

        self.sigDone.emit(out)
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def match(image, templates, threshold, flip, exclude_regions):
    """Look for TEMPLATES in IMAGE and return the bounding boxes of
    the matches. Options may be provided after each TEMPLATE.

    Example::

      histonets match http://foo.bar/tmpl1 -th 50 http://foo.bar/tmpl2 -th 95

    \b
    - TEMPLATE is a path to a local (file://) or remote (http://, https://)
      image file of the template to look for."""
    # TODO: Click invoke fails at testing time, but not at runtime :(
    #       template options should be a list of the same length that templates
    none_list = [None] * len(templates)
    args = (
        Image.get_images(templates),  # pipeline does not invoke the decorator
        threshold or none_list,
        flip or none_list,
        exclude_regions or none_list,
    )
    if len(set(len(x) for x in args)) != 1:
        raise click.BadParameter('Some templates or options are missing.')
    image_templates = []
    for (template_image, template_threshold, template_flip,
         template_exclude_regions) in zip(*args):
        mask = None
        if template_exclude_regions:
            try:
                mask = ~get_mask_polygons(template_exclude_regions,
                                          *template_image.image.shape[:2])
            except cv2.error:
                raise click.BadParameter('Polygons JSON is malformed.')
        image_templates.append({
            'image': template_image.image,
            'threshold': template_threshold,
            'flip': template_flip,
            'mask': mask,
        })
    matches = match_templates(image, image_templates)
    return matches.tolist()
项目:benchmark-keras    作者:beeva-ricardoguerrero    | 项目源码 | 文件源码
def filter_corrupted_images(path2dataset, prefix, path2filtereddataset):

    filtered_paths = []

    with open(path2dataset, 'rb') as fin:
        paths = fin.readlines()

    num_total_paths = len(paths)

    for num, line in enumerate(paths):
        path, label = line.strip().split()

        if os.path.exists(prefix + path):
            try:
                image = cv2.imread(prefix + path)
                _ = cv2.resize(image, (image.shape[1], image.shape[0]),
                                   interpolation=cv2.INTER_AREA)  # Some images are corrupted in a way that imread does not throw any exception.
                                                                  # Doing a small operation on it, will uncover the misbehaviour
                filtered_paths.append(line)

            except cv2.error:
                print("Exception catched. The image in path %s can't be read. Could be corrupted\n" % path)

        else:
            print("There is no image in %s" % path)

        if num % 100 == 0 and num != 0:
            print("Processed 100 more images.. (%d/%d)\n" % (num, num_total_paths))

    print("Total correct images: %d", len(filtered_paths))

    with open(path2filtereddataset, 'wb') as fout:
        fout.writelines(filtered_paths)
项目:benchmark-keras    作者:beeva-ricardoguerrero    | 项目源码 | 文件源码
def preprocess_images_worker(line, prefix_orig, prefix_dest, img_rows, img_cols, img_crop_rows, img_crop_cols):
    """
    Meant to be called by preprocess_images_multiprocess
    Nested functions (to avoid parameters copy) can not be parallelizable, hence, this function is defined here
    """

    path, label = line.strip().split()

    if os.path.exists(prefix_orig + path):
        try:
            image = cv2.imread(prefix_orig + path)
            image = cv2.resize(image, (img_rows, img_rows),
                               interpolation=cv2.INTER_AREA)  # Resize in create_caffenet.sh

        except cv2.error:
            print("Exception catched. The image in path %s can't be read. Could be corrupted\n" % path)
            return ""

        if img_crop_rows != 0 and img_crop_rows != img_rows:  # We need to crop rows
            crop_rows = img_rows - img_crop_rows
            crop_rows_pre, crop_rows_post = int(mt.ceil(crop_rows / 2.0)), int(mt.floor(crop_rows / 2.0))
            image = image[crop_rows_pre:-crop_rows_post, :]

        if img_crop_cols != 0 and img_crop_cols != img_cols:  # We need to crop cols
            crop_cols = img_cols - img_crop_cols
            crop_cols_pre, crop_cols_post = int(mt.ceil(crop_cols / 2.0)), int(mt.floor(crop_cols / 2.0))
            image = image[:, crop_cols_pre:-crop_cols_post]  # Crop in train_val.prototxt

        # Store the image in h5 format
        npy_path = prefix_dest + path.split(".")[0] + ".npy"
        with open(npy_path, "wb") as fout:
            np.save(fout, image)

        return line.replace("JPEG", "npy")

    else:
        print("There is no image in %s" % path)
        return ""
项目:benchmark-keras    作者:beeva-ricardoguerrero    | 项目源码 | 文件源码
def preprocess_images(path2dataset_orig, prefix_orig, path2dataset_dest, prefix_dest, img_rows, img_cols, img_crop_rows, img_crop_cols):
    # Origin path = prefix + path -> /mnt/img/img393.JPEG
    # Destiny path = prefix2 + path -> /mnt/h5/img393.h5

    processed_paths = []

    with open(path2dataset_orig, 'rb') as fin:
        paths = fin.readlines()

    num_total_paths = len(paths)

    for num, line in enumerate(paths):
        path, label = line.strip().split()

        if os.path.exists(prefix_orig + path):
            try:
                image = cv2.imread(prefix_orig + path)
                image = cv2.resize(image, (img_rows, img_rows),
                                   interpolation=cv2.INTER_AREA)  # Resize in create_caffenet.sh

            except cv2.error:
                print("Exception catched. The image in path %s can't be read. Could be corrupted\n" % path)
                continue

            if img_crop_rows != 0 and img_crop_rows != img_rows:  # We need to crop rows
                crop_rows = img_rows - img_crop_rows
                crop_rows_pre, crop_rows_post = int(mt.ceil(crop_rows / 2.0)), int(mt.floor(crop_rows / 2.0))
                image = image[crop_rows_pre:-crop_rows_post, :]

            if img_crop_cols != 0 and img_crop_cols != img_cols:  # We need to crop cols
                crop_cols = img_cols - img_crop_cols
                crop_cols_pre, crop_cols_post = int(mt.ceil(crop_cols / 2.0)), int(mt.floor(crop_cols / 2.0))
                image = image[:, crop_cols_pre:-crop_cols_post]  # Crop in train_val.prototxt

            # Store the image in h5 format
            npy_path = prefix_dest + path.split(".")[0] + ".npy"
            with open(npy_path, "wb") as fout:
                np.save(fout, image)

            processed_paths.append(line.replace("JPEG", "npy"))

        else:
            print("There is no image in %s" % path)

        if num % 100 == 0 and num != 0:
            print("Pre-processed 100 more images.. (%d/%d)\n" % (num, num_total_paths))

    with open(path2dataset_dest, "wb") as fout:
        fout.writelines(processed_paths)

    print("Total images pre-processed: %d (remember that corrupted or not present images were discarded)" % len(processed_paths))
项目:benchmark-keras    作者:beeva-ricardoguerrero    | 项目源码 | 文件源码
def load_img_as_4Dtensor(path2dataset, prefix, img_rows, img_cols, img_crop_rows, img_crop_cols):
    """

    :return:
    """

    x_ls = []
    y_ls = []

    with open(path2dataset, 'rb') as fin:
        paths = fin.readlines()

    num_total_paths = len(paths)

    for num, line in enumerate(paths):
        path, label = line.strip().split()

        if os.path.exists(prefix + path):
            try:
                image = cv2.imread(prefix + path)
                image = cv2.resize(image, (img_rows, img_cols),
                                   interpolation=cv2.INTER_AREA)  # Resize in create_caffenet.sh

                if img_crop_rows != 0 and img_crop_rows != img_rows: # We need to crop rows
                    crop_rows = img_rows - img_crop_rows
                    crop_rows_pre, crop_rows_post = int(mt.ceil(crop_rows / 2.0)), int(mt.floor(crop_rows / 2.0))
                    image = image[crop_rows_pre:-crop_rows_post, :]

                if img_crop_cols != 0 and img_crop_cols != img_cols:  # We need to crop cols
                    crop_cols = img_cols - img_crop_cols
                    crop_cols_pre, crop_cols_post = int(mt.ceil(crop_cols / 2.0)), int(mt.floor(crop_cols / 2.0))
                    image = image[:, crop_cols_pre:-crop_cols_post]  # Crop in train_val.prototxt

                x_ls.append(image)
                y_ls.append(int(label))
            except cv2.error:
                print("Exception catched. The image in path %s can't be read. Could be corrupted\n" % path)
        else:
            print("There is no image in %s" % path)

        if num % 100 == 0 and num != 0:
            print("Loaded 100 more images.. (%d/%d)\n" % (num, num_total_paths))


    print("Total images loaded: %d (remember that corrupted or not present images were discarded)" % len(x_ls))

    x_np = np.array(x_ls)
    y_np = np.array(y_ls)

    return x_np, y_np
项目:FaceRecognition    作者:fonfonx    | 项目源码 | 文件源码
def create_dictionaries_from_db(repo, train_size, test_size, verbose=True):
    """ Create training and testing sets from a database with a fixed number of images in both sets """

    train_images = []
    test_images = []
    name_labels = {}
    directories = sorted(listdir(repo))
    label = 0
    print "Processing images ..."
    for d in directories:
        images = sorted(listdir(repo + d))
        shuffle(images)
        if len(images) >= 10:  # in the paper we consider only these images - can be replaced by train_size + test_size
            nb_img = 0
            i = 0
            while nb_img < train_size + test_size and i < len(images):
                path_image = repo + d + "/" + images[i]
                i += 1
                try:
                    if nb_img < train_size:
                        train_images.append(column_from_image(path_image, verbose))
                    else:
                        test_images.append(column_from_image(path_image, verbose))
                    nb_img += 1
                except (cv2.error, TypeError, ValueError) as e:
                    print "error image " + path_image + " " + str(e)
            if nb_img < train_size + test_size:
                print "Removing " + d
                if nb_img <= train_size and nb_img > 0:
                    del train_images[-nb_img:]
                elif nb_img > 0:
                    del train_images[-train_size:]
                    del test_images[-(nb_img - train_size):]
            else:
                label += 1
                name_labels[label] = d

    train_set = (np.column_stack(train_images)).astype(float)
    test_set = (np.column_stack(test_images)).astype(float)

    print "Training and Test sets have been created with success!"
    print "There are " + str(label) + " classes"

    return train_set, test_set, label, name_labels