Python cv2 模块,boxPoints() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用cv2.boxPoints()

项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_select(ctrs, im):
    # ????????????
    cand_rect = []
    for item in ctrs:
        epsilon = 0.02*cv2.arcLength(item, True)
        approx = cv2.approxPolyDP(item, epsilon, True)  
        if len(approx) <= 8:
            rect = cv2.minAreaRect(item)
            if rect[1][0] < 20 or rect[1][1] < 20:
                continue
            if rect[1][0] > 150 or rect[1][1] > 150:
                continue        
            #ratio = (rect[1][1]+0.00001) / rect[1][0]
            #if ratio > 1 or ratio < 0.9:
            #    continue
            box = cv2.boxPoints(rect)
            box_d = np.int0(box)
            cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
            cand_rect.append(box)
    img_show_hook("????", im)   
    return cand_rect
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_select(ctrs, im):
    # ????????????
    cand_rect = []
    for item in ctrs:
        epsilon = 0.02*cv2.arcLength(item, True)
        approx = cv2.approxPolyDP(item, epsilon, True)  
        if len(approx) <= 8:
            rect = cv2.minAreaRect(item)
            #????????
            if rect[2] < -10 and rect[2] > -80:
                continue
            if rect[1][0] < 10 or rect[1][1] < 10:
                continue
            #ratio = (rect[1][1]+0.00001) / rect[1][0]
            #if ratio > 1 or ratio < 0.9:
            #    continue
            box = cv2.boxPoints(rect)
            box_d = np.int0(box)
            cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
            cand_rect.append(box)
    img_show_hook("????", im)   
    return cand_rect
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.boxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary)
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def getMask(self, shape):

        p=self.state['pos']
        s=self.state['size']
        center=p + s / 2
        a=self.state['angle']
        # opencv convention:
        shape = (shape[1], shape[0])
        arr1 = np.zeros(shape, dtype=np.uint8)
        arr2 = np.zeros(shape, dtype=np.uint8)

        # draw rotated rectangle:
        vertices = np.int0(cv2.boxPoints((center, s, a)))
        cv2.drawContours(arr1, [vertices], 0, color=1, thickness=-1)
        # draw ellipse:
        cv2.ellipse(arr2, (int(center[0]), int(center[1])), (int(s[0] / 2 * self._ratioEllispeRectangle),
                     int(s[1] / 2 * self._ratioEllispeRectangle)), int(a),
                    startAngle=0, endAngle=360, color=1, thickness=-1)
        # bring both together:
        return np.logical_and(arr1, arr2).T
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def getMask(self, shape):

        p = self.state['pos']
        s = self.state['size']
        center = p + s / 2
        a = self.state['angle']
        # opencv convention:
        shape = (shape[1], shape[0])
        arr = np.zeros(shape, dtype=np.uint8)
        # draw rotated rectangle:
        vertices = np.int0(cv2.boxPoints((center, s, a)))
        cv2.drawContours(arr, [vertices],
                         0,
                         color=1,
                         thickness=-1)
        return arr.astype(bool).T
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def _bounding_box_of(contour):
    rotbox = cv2.minAreaRect(contour)
    coords = cv2.boxPoints(rotbox)

    xrank = np.argsort(coords[:, 0])

    left = coords[xrank[:2], :]
    yrank = np.argsort(left[:, 1])
    left = left[yrank, :]

    right = coords[xrank[2:], :]
    yrank = np.argsort(right[:, 1])
    right = right[yrank, :]

    #            top-left,       top-right,       bottom-right,    bottom-left
    box_coords = tuple(left[0]), tuple(right[0]), tuple(right[1]), tuple(left[1])
    box_dims = rotbox[1]
    box_centroid = int((left[0][0] + right[1][0]) / 2.0), int((left[0][1] + right[1][1]) / 2.0)

    return box_coords, box_dims, box_centroid
项目:Recognition    作者:thautwarm    | 项目源码 | 文件源码
def deal(self,frame):
        frame=frame.copy()
        track_window=self.track_window
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        roi_hist=self.roi_hist 
        dst = cv2.calcBackProject([frame],[0],roi_hist,[0,180],1)
        if self.m=='m':
            ret, track_window_r = cv2.meanShift(dst, track_window, term_crit)
            x,y,w,h = track_window_r
            img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        elif self.m=='c':
            ret, track_window_r = cv2.CamShift(dst, track_window, term_crit)


            pts = cv2.boxPoints(ret)
            pts = np.int0(pts)
            img2 = cv2.polylines(frame,[pts],True, 255,2)
        rectsNew=[]

        center1=(track_window[0]+track_window[2]//2,track_window[1]+track_window[3]//2)
        center2=(track_window_r[0]+track_window_r[2]//2,track_window_r[1]+track_window_r[3]//2)
        img2 = cv2.line(img2,center1,center2,color=0)
        rectsNew=track_window_r
#        x,y,w,h = track_window
#        img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
        cv2.imshow('img2',img2)
        cv2.waitKey(0) 
        cv2.destroyAllWindows()
        return rectsNew
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def _estimate_current_anticlockwise_degrees_using_minarearect(self, spot_xy) -> float:
        # Find the minimum area rectangle around the number
        nearby_contour_groups = contour_tools.extract_contour_groups_close_to(
            self.contour_groups, target_point_xy=spot_xy, delta=self._min_pixels_between_contour_groups)
        nearby_contours = [c for grp in nearby_contour_groups for c in grp]
        box = cv2.minAreaRect(np.row_stack(nearby_contours))
        corners_xy = cv2.boxPoints(box).astype(np.int32)
        self._log_contours_on_current_image([corners_xy], name="Minimum area rectangle")

        # Construct a vector which, once correctly rotated, goes from the bottom right corner up & left at 135 degrees
        sorted_corners = sorted(corners_xy, key=lambda pt: np.linalg.norm(spot_xy - pt))
        bottom_right_corner = sorted_corners[0]  # The closest corner to the spot
        adjacent_corners = sorted_corners[1:3]  # The next two closest corners

        unit_vectors_along_box_edge = misc.normalised(adjacent_corners - bottom_right_corner)
        up_left_diagonal = unit_vectors_along_box_edge.sum(axis=0)

        degrees_of_up_left_diagonal = np.rad2deg(np.arctan2(-up_left_diagonal[1], up_left_diagonal[0]))
        return degrees_of_up_left_diagonal - 135
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def shapeFiltering(img):
    contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
    if len(contours) == 0:
        return "yoopsie"
    #else:
        #print contours
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contours, -1, (255, 255, 255))
    cv2.imshow("imagiae", blank_image)
    cv2.waitKey()"""
    good_shape = []
    for c in contours:
        x,y,w,h = cv2.boundingRect(c)
        """rect = cv2.minAreaRect(contour)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        w = """
        #if h == 0:
        #    continue
        ratio = w / h
        ratio_grade = ratio / (TMw / TMh)
        if 0.2 < ratio_grade < 1.8:
            good_shape.append(c)
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, good_shape, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    return good_shape
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def findCorners(contour):
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contour, -1, (255, 255, 255))
    rows,cols = img.shape[0], img.shape[1]
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5)
    dst = cv2.warpAffine(blank_image,M,(cols,rows))
    cv2.imshow("rotatio", dst)
    cv2.waitKey()"""
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    height_px_1 = box[0][1] - box[3][1]
    height_px_2 = box[1][1] - box[2][1]
    print height_px_1, height_px_2
    if height_px_1 < height_px_2:
        close_height_px = height_px_2
        far_height_px = height_px_1
    else:
        close_height_px = height_px_1
        far_height_px = height_px_2

    return close_height_px, far_height_px
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def findCorners(contour):
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    box = numpy.int0(box)
    height_px_1 = box[0][1] - box[3][1]
    height_px_2 = box[1][1] - box[2][1]
    print height_px_1, height_px_2
    if height_px_1 < height_px_2:
        close_height_px = height_px_2
        far_height_px = height_px_1
    else:
        close_height_px = height_px_1
        far_height_px = height_px_2

    return close_height_px, far_height_px
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def draw_detections(img, detections, color = (0, 255, 0)):

    for i in range(0, detections.shape[2]):
        det_word = detections[0, 0, i]
        if (det_word[0] == 0 and det_word[1] == 0) or det_word[5] < 0.05:
            break


        box  = ((det_word[0], det_word[1]), (det_word[2], det_word[3]), det_word[4] * 180 / 3.14)
        box = cv2.boxPoints(box)
        box = np.array(box, dtype="int")

        draw_box_points(img, box, color)
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def get_normalized_image(img, rr, debug = False):

  box = cv2.boxPoints(rr)
  extbox = cv2.boundingRect(box)

  if extbox[2] *  extbox[3] > img.shape[0] * img.shape[1]:
    print("Too big proposal: {0}x{1}".format(extbox[2], extbox[3]))
    return None, None
  extbox = [extbox[0], extbox[1], extbox[2], extbox[3]]
  extbox[2] += extbox[0]
  extbox[3] += extbox[1]
  extbox = np.array(extbox, np.int)

  extbox[0] = max(0, extbox[0])
  extbox[1] = max(0, extbox[1])
  extbox[2] = min(img.shape[1], extbox[2])
  extbox[3] = min(img.shape[0], extbox[3])

  tmp = img[extbox[1]:extbox[3], extbox[0]:extbox[2]]
  center = (tmp.shape[1] / 2,  tmp.shape[0] / 2)
  rot_mat = cv2.getRotationMatrix2D( center, rr[2], 1 )

  if tmp.shape[0] == 0 or tmp.shape[1] == 0:
    return None, rot_mat

  if debug:
    vis.draw_box_points(img,  np.array(extbox, dtype="int"), color = (0, 255, 0))
    cv2.imshow('scaled', img)

  rot_mat[0,2] += rr[1][0] /2.0 - center[0]
  rot_mat[1,2] += rr[1][1] /2.0 - center[1]
  try:
    norm_line = cv2.warpAffine( tmp, rot_mat, (int(rr[1][0]), int(rr[1][1])), borderMode=cv2.BORDER_REPLICATE )
  except:
    return None, rot_mat
  return norm_line, rot_mat
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def get_obox(im, scaled, box):

  image_size = (scaled.shape[1], scaled.shape[0])

  o_size = (im.shape[1], im.shape[0])
  scalex = o_size[0] / float(image_size[0])
  scaley = o_size[1] / float(image_size[1])

  box2 = np.copy(box)

  gtbox  = ((box[0][0], box[0][1]), (box[1][0], box[1][1]), box[2])
  gtbox = cv2.boxPoints(gtbox)
  gtbox = np.array(gtbox, dtype="float")

  #vis.draw_box_points(im,  np.array(gtbox, dtype="int"), color = (0, 255, 0))
  #cv2.imshow('orig', im)

  gtbox[:,0] /=  scalex
  gtbox[:,1] /=  scaley

  dh = gtbox[0, :] - gtbox[1, :]
  dw = gtbox[1, :] - gtbox[2, :]


  h = math.sqrt(dh[0] * dh[0] + dh[1] * dh[1])
  w = math.sqrt(dw[0] * dw[0] + dw[1] * dw[1])

  box2[0][0] /=  scalex
  box2[0][1] /=  scaley

  box2[1][0] = w
  box2[1][1] = h

  box2[2] = math.atan2((gtbox[2, 1] - gtbox[1, 1]), (gtbox[2, 0] - gtbox[1, 0])) * 180 / 3.14

  return box2
项目:omr    作者:rbaron    | 项目源码 | 文件源码
def get_bounding_rect(contour):
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    return np.int0(box)
项目:pyANPD    作者:abdulfatir    | 项目源码 | 文件源码
def validate_contour(contour, img, aspect_ratio_range, area_range):
    rect = cv2.minAreaRect(contour)
    img_width = img.shape[1]
    img_height = img.shape[0]
    box = cv2.boxPoints(rect) 
    box = np.int0(box)

    X = rect[0][0]
    Y = rect[0][1]
    angle = rect[2] 
    width = rect[1][0]
    height = rect[1][1]

    angle = (angle + 180) if width < height else (angle + 90)

    output=False

    if (width > 0 and height > 0) and ((width < img_width/2.0) and (height < img_width/2.0)):
        aspect_ratio = float(width)/height if width > height else float(height)/width
        if (aspect_ratio >= aspect_ratio_range[0] and aspect_ratio <= aspect_ratio_range[1]):
            if((height*width > area_range[0]) and (height*width < area_range[1])):

                box_copy = list(box)
                point = box_copy[0]
                del(box_copy[0])
                dists = [((p[0]-point[0])**2 + (p[1]-point[1])**2) for p in box_copy]
                sorted_dists = sorted(dists)
                opposite_point = box_copy[dists.index(sorted_dists[1])]
                tmp_angle = 90

                if abs(point[0]-opposite_point[0]) > 0:
                    tmp_angle = abs(float(point[1]-opposite_point[1]))/abs(point[0]-opposite_point[0])
                    tmp_angle = rad_to_deg(math.atan(tmp_angle))

                if tmp_angle <= 45:
                    output = True
    return output
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def transform_boxes(self, im, scaled, word_gto):

      image_size = (scaled.shape[1], scaled.shape[0])

      o_size = (im.shape[1], im.shape[0])
      normo = math.sqrt(im.shape[1] * im.shape[1] + im.shape[0] * im.shape[0] )
      normo2 = math.sqrt(image_size[1] * image_size[1] + image_size[0] * image_size[0] )
      scalex = o_size[0] / float(image_size[0])
      scaley = o_size[1] / float(image_size[1])

      gto_out = []
      for gt_no in range(len(word_gto)):
          gt = word_gto[gt_no]
          gtbox  = ((gt[0] * o_size[0], gt[1] * o_size[1]), (gt[2] * normo, gt[3] * normo), gt[4] * 180 / 3.14)
          gtbox = cv2.boxPoints(gtbox)
          gtbox = np.array(gtbox, dtype="float")

          #draw_box_points(im,  np.array(gtbox, dtype="int"), color = (0, 255, 0))
          #cv2.imshow('orig', im)

          gtbox[:,0] /=  scalex
          gtbox[:,1] /=  scaley

          dh = gtbox[0, :] - gtbox[1, :]
          dw = gtbox[1, :] - gtbox[2, :]


          h = math.sqrt(dh[0] * dh[0] + dh[1] * dh[1]) / normo2
          w = math.sqrt(dw[0] * dw[0] + dw[1] * dw[1]) / normo2

          #if w * normo2 < 5 or h * normo2  < 5 or np.isinf(w) or np.isinf(h):
              #print("warn: removig too small gt {0}".format(gt))
          #    continue

          gt[2] = w
          gt[3] = h

          gt[4] = math.atan2((gtbox[2, 1] - gtbox[1, 1]), (gtbox[2, 0] - gtbox[1, 0]))

          gt[8] = gtbox
          if self.debug:
            print('gtbox : ' + str(gtbox))
          gto_out.append(gt)
          #draw_box_points(scaled,  np.array(gtbox, dtype="int"), color = (0, 255, 0))
          #cv2.imshow('scaled', scaled)
          #cv2.waitKey(0)

      return gto_out
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def _find_a_thing(self, c, min_height, max_height, min_width, max_width, max_distance, debug_img=None):
        rect = cv2.minAreaRect(c)
        box = cv2.cv.BoxPoints(rect) if is_cv2() else cv2.boxPoints(rect)

        top,bottom,left,right,center = self.find_dimensions(np.int0(np.array(box)))

        if top is None or left is None or center is None:
            return None

        vertical = self.find_distance(top, bottom)
        horizontal = self.find_distance(left, right)
        away = self.find_distance(center, None)

        if vertical > horizontal:
            height = vertical
            width = horizontal
            flipped = False
        else:
            height = horizontal
            width = vertical
            flipped = True

        if height < min_height or height > max_height:
            return None

        if width < min_width or width > max_height:
            return None

        if away > max_distance:
            return None

        # This page was helpful in understanding angle
        # https://namkeenman.wordpress.com/2015/12/18/open-cv-determine-angle-of-rotatedrect-minarearect/
        angle = rect[2]
        if rect[1][0] < rect[1][1]:
            angle -= 90.0

        if debug_img is not None:
            x,y,w,h = cv2.boundingRect(c)
            cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2)
            cv2.drawContours(debug_img, [np.int0(np.array(box))], -1, (0, 0, 255), 2)
            cv2.rectangle(debug_img,(x,y),(x+w,y+h),(255,0,0),2)

            cv2.circle(debug_img, top, 5, (255, 255, 0))
            cv2.circle(debug_img, bottom, 5, (255, 255, 0))
            cv2.circle(debug_img, left, 5, (255, 255, 0))
            cv2.circle(debug_img, right, 5, (255, 255, 0))
            cv2.circle(debug_img, center, 5, (255, 255, 0))


        return Thing(height, width, center, angle)
项目:Stronghold-2016-Vision    作者:team4099    | 项目源码 | 文件源码
def get_contours(orig_image):
    """
    Get edge points (hopefully corners) from the given opencv image (called
    contours in opencv)

    Parameters:
        :param: `orig_image` - the thresholded image from which to find contours
    """
    new_image = numpy.copy(orig_image)
    # cv2.imshow("Vision", new_image)
    # cv2.waitKey(1000)
    new_image, contours, hierarchy = cv2.findContours(new_image,
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_SIMPLE)
    # print(len(contours))
    # print(len(contours[0]))
    # print(len(contours[0][0]))
    # print(len(contours[0][0][0]))
    largest_contour = 0
    most_matching = 0
    min_score = 0
    max_area = 0
    if len(contours) > 1:
        print("Length of contours:", len(contours))
        max_area = cv2.contourArea(contours[0])
        min_score = average_goal_matching(contours[0])
        for i in range(1, len(contours)):
            # print(contours[i])
            current_score = average_goal_matching(contours[i])
            current_area = cv2.contourArea(contours[i])
            if current_area > max_area:
                max_area = current_area
                largest_contour = i
            if current_score < min_score and current_score != 0 and current_area > 300 and current_area < 1500:
                min_score = current_score
                most_matching = i
    elif len(contours) == 0:
        raise GoalNotFoundException("Goal not found!")
    if min_score >= 9999999999999999:
        raise GoalNotFoundException("Goal not found!")
    print("largest_contour:", largest_contour)
    print("Area:", max_area)
    # print("largest_contour:", largest_contour)
    print("Most matching:", most_matching)
    print("Score:", min_score)
    print("Area of most matching:", cv2.contourArea(contours[most_matching]))

    rect = cv2.minAreaRect(contours[most_matching])
    box = cv2.boxPoints(rect)
    box = numpy.int0(box)
    # print(box)
    return numpy.array(contours[most_matching]), box
项目:2017-Vision    作者:RoboticsTeam4904    | 项目源码 | 文件源码
def filterContoursFancy(contours, image=None):
    if len(contours) == 0:
        return []

    numContours = len(contours)
    areas = np.array([cv2.contourArea(contour) for contour in contours])

    boundingRects = [cv2.boundingRect(contour) for contour in contours]
    widths, heights, positions = boundingInfo(boundingRects)

    rotatedRects = [cv2.minAreaRect(contour) for contour in contours]
    if config.withOpenCV3:
        rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects]
    else:
        rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects]
    rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes]

    sizeScores = [size(area)for area in areas]
    ratioScores = ratios(widths, heights)
    rotationScores = [rotation(rect) for rect in rotatedRects]
    rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)]
    areaScores = polygonAreaDiff(areas, rotatedAreas)
    quadScores = [Quadrify(contour) for contour in contours]

    rectangularScores = np.divide(rectangularScores, widths)

    scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores])
    contourScores = np.dot(weights, scores)

    correctInds, incorrectInds = sortedInds(contourScores)
    correctContours = np.array(contours)[correctInds]

    if config.extra_debug:
        print "size, ratio, rotation, rectangular, area, quad"
        print "Weights:", weights
        print "Scores: ", contourScores
        print np.average(scores, axis=1)
        if len(incorrectInds) != 0:
            print "AVG, WORST", test(scores, correctInds, incorrectInds)
        for i in range(numContours):
            print "CONTOUR " + str(i)
            print np.multiply(scores[:, i], weights) #newWeights
            print contourScores[i]
            if image:
                img = copy.deepcopy(image)
                Printing.drawImage(img, contours[:i] + contours[i+1:], contours[i], False)
                Printing.display(img, "contour " + str(i), doResize=True)
            cv2.waitKey(0)
        cv2.destroyAllWindows()
    return correctContours
项目:2017-Vision    作者:RoboticsTeam4904    | 项目源码 | 文件源码
def filterContoursAutocalibrate(contours, image=None):
    if len(contours) == 0:
        return []

    numContours = len(contours)
    areas = np.array([cv2.contourArea(contour) for contour in contours])

    boundingRects = [cv2.boundingRect(contour) for contour in contours]
    widths, heights, positions = boundingInfo(boundingRects)

    rotatedRects = [cv2.minAreaRect(contour) for contour in contours]
    if config.withOpenCV3:
        rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects]
    else:
        rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects]
    rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes]

    sizeScores = [size(area)for area in areas]
    ratioScores = ratios(widths, heights)
    rotationScores = [rotation(rect) for rect in rotatedRects]
    rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)]
    areaScores = polygonAreaDiff(areas, rotatedAreas)
    quadScores = [Quadrify(contour) for contour in contours]

    rectangularScores = np.divide(rectangularScores, widths)

    scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores])
    contourScores = np.dot(weights, scores)

    correctInds, incorrectInds = sortedInds(contourScores)
    correctContours = np.array(contours)[correctInds]

    averageScore = 0
    for i in range(numContours):
        averageScore += sizeScores[i]
        averageScore +=  ratioScores[i]
        averageScore +=  rotationScores[i]
        averageScore +=  rectangularScores[i]
        averageScore +=  areaScores[i]
        averageScore +=  quadScores[i]
    averageScore /= numContours
    return averageScore