Python cv2 模块,getPerspectiveTransform() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.getPerspectiveTransform()

项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def render_lane(image, corners, ploty, fitx, ):

    _,  src,  dst = perspective_transform(image, corners)
    Minv = cv2.getPerspectiveTransform(dst, src)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts = np.vstack((fitx,ploty)).astype(np.int32).T

    # Draw the lane onto the warped blank image
    #plt.plot(left_fitx, ploty, color='yellow')
    cv2.polylines(color_warp,  [pts],  False,  (0, 255, 0),  10)
    #cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 

    # Combine the result with the original image
    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)

    return result
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, mtx, dist):
    # Use the OpenCV undistort() function to remove distortion
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def m_screen_to_marker(marker):
    #verts need to be sorted counterclockwise stating at bottom left
    #marker coord system:
    # +-----------+
    # |0,1     1,1|  ^
    # |           | / \
    # |           |  |  UP
    # |0,0     1,0|  |
    # +-----------+
    mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32)
    return cv2.getPerspectiveTransform(np.array(marker['verts'],dtype=np.float32),mapped_space_one)





#persistent vars for detect_markers_robust
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    rect = order_points(pts)
    (tl, tr, br, bl) = rect
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32"
    )
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped
项目:omr    作者:rbaron    | 项目源码 | 文件源码
def perspective_transform(img, points):
    """Transform img so that points are the new corners"""

    source = np.array(
        points,
        dtype="float32")

    dest = np.array([
        [TRANSF_SIZE, TRANSF_SIZE],
        [0, TRANSF_SIZE],
        [0, 0],
        [TRANSF_SIZE, 0]],
        dtype="float32")

    img_dest = img.copy()
    transf = cv2.getPerspectiveTransform(source, dest)
    warped = cv2.warpPerspective(img, transf, (TRANSF_SIZE, TRANSF_SIZE))
    return warped
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def perspective_transform(image, corners, debug=False, xoffset=0):

     height, width = image.shape[0:2]
     output_size = height/2

     new_top_left=np.array([corners[0,0],0])
     new_top_right=np.array([corners[3,0],0])
     offset=[xoffset,0]    
     img_size = (image.shape[1], image.shape[0])
     src = np.float32([corners[0],corners[1],corners[2],corners[3]])
     dst = np.float32([corners[0]+offset,new_top_left+offset,new_top_right-offset ,corners[3]-offset]) 

     M = cv2.getPerspectiveTransform(src, dst)

     warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)

     if debug:
         drawQuad(image, src, [255, 0, 0])
         drawQuad(warped, dst, [255, 255, 0])
         plt.imshow(image)
         plt.show()
         plt.imshow(warped)
         plt.show()

     return warped,  src,  dst
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def perspective_transform(self,  image, debug=True, size_top=70, size_bottom=370):
        height, width = image.shape[0:2]
        output_size = height/2

        #src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
        src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
        dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
        #dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])

        M = cv2.getPerspectiveTransform(src, dst)
        print(M)
        warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)

        if debug:
            self.drawQuad(image, src, [255, 0, 0])
            self.drawQuad(image, dst, [255, 255, 0])
            plt.imshow(image)
            plt.show()

        return warped
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
    num, W, H, _ = ims_src.shape
    if distortion:
        ran_noise = np.random.random((4, 2))
        ran_color = np.random.randn(3,)
    else:
        ran_noise = np.ones((4, 2)) * 0.5
        ran_color = np.zeros(3,)

    # perspective translation
    dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
    noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
    src = np.float32(dst + noise)

    mat = cv2.getPerspectiveTransform(src, dst)
    for i in range(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    # color deviation
    deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
    ims_src += deviation[None, None, None, :]

    return ims_src, mat
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
    num, W, H, _ = ims_src.shape
    if distortion:
        ran_noise = np.random.random((4, 2))
        ran_color = np.random.randn(3,)
    else:
        ran_noise = np.ones((4, 2)) * 0.5
        ran_color = np.zeros(3,)

    # perspective translation
    dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
    noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
    src = np.float32(dst + noise)

    mat = cv2.getPerspectiveTransform(src, dst)
    for i in range(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    # color deviation
    deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
    ims_src += deviation[None, None, None, :]

    return ims_src, mat
项目:ego-lane-analysis-system    作者:rodrigoberriel    | 项目源码 | 文件源码
def apply_ipm(img, config, ys):
    # IPM
    y_top, y_bottom = min(ys), max(ys)
    ipm_pts = config['dataset']['ipm_points']
    roi = config['dataset']['region_of_interest']

    src = np.array([
        [ipm_pts['@top_left'], y_top],
        [ipm_pts['@top_right'], y_top],
        [ipm_pts['@bottom_right'], y_bottom],
        [ipm_pts['@bottom_left'], y_bottom],

       ], dtype="float32")

    dst = np.array([
        [ipm_pts['@top_left'], 0],
        [ipm_pts['@top_right'], 0],
        [ipm_pts['@top_right'], roi['@height']],
        [ipm_pts['@top_left'], roi['@height']],
       ], dtype="float32")

    M = cv2.getPerspectiveTransform(src, dst)
    return cv2.warpPerspective(img, M, (roi['@width'], roi['@height']))
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def four_point_transform(image, pts):

    rect = order_points(pts)
    (tl, tr, br, bl) = rect


    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    return warped
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomShiftScaleRotate(img, shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, u=0.5):
    if random.random() < u:
        height, width, channel = img.shape

        angle = random.uniform(-rotate_limit, rotate_limit)  # degree
        scale = random.uniform(1 - scale_limit, 1 + scale_limit)
        dx = round(random.uniform(-shift_limit, shift_limit)) * width
        dy = round(random.uniform(-shift_limit, shift_limit)) * height

        cc = math.cos(angle / 180 * math.pi) * (scale)
        ss = math.sin(angle / 180 * math.pi) * (scale)
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        img = cv2.warpPerspective(img, mat, (width, height), flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_REFLECT_101)  # cv2.BORDER_CONSTANT, borderValue = (0, 0, 0))  #cv2.BORDER_REFLECT_101

    return img
项目:answer-sheet-scan    作者:inuyasha2012    | 项目源码 | 文件源码
def detect_cnt_again(poly, base_img):
    """
    ???????????????????
    :param poly: ndarray
    :param base_img: ndarray
    :return: ndarray
    """
    # ?????????????????flag
    flag = False

    # ?????????????????????????
    top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
    roi_img = get_roi_img(base_img, bottom_left, bottom_right, top_left, top_right)
    img = get_init_process_img(roi_img)

    # ?????????
    cnt = get_max_area_cnt(img)

    # ?????????????????????
    if cv2.contourArea(cnt) > roi_img.shape[0] * roi_img.shape[1] * SHEET_AREA_MIN_RATIO:
        flag = True
        poly = cv2.approxPolyDP(cnt, cv2.arcLength((cnt,), True) * 0.1, True)
        top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
        if not poly.shape[0] == 4:
            raise PolyNodeCountError

    # ?????????????????
    base_poly_nodes = np.float32([top_left[0], bottom_left[0], top_right[0], bottom_right[0]])
    base_nodes = np.float32([[0, 0],
                            [base_img.shape[1], 0],
                            [0, base_img.shape[0]],
                            [base_img.shape[1], base_img.shape[0]]])
    transmtx = cv2.getPerspectiveTransform(base_poly_nodes, base_nodes)

    if flag:
        img_warp = cv2.warpPerspective(roi_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    else:
        img_warp = cv2.warpPerspective(base_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    return img_warp
项目:autonomous_driving    作者:StatueFungus    | 项目源码 | 文件源码
def _calculate_transformation_matrix(self):
        p1_w, p2_w, p3_w, p4_w = self._calculate_world_coordinates()

        rect = np.array([
            [0, self.horizon_y],
            [self.image_resolution[1] - 1, self.horizon_y],
            [self.image_resolution[1] - 1, self.image_resolution[0] - 1],
            [0, self.image_resolution[0] - 1]
        ], dtype="float32")

        p1_new, p2_new, p3_new, p4_new = self._calculate_destination_points(
            p1_w, p2_w, p3_w, p4_w)

        dst = np.array([
            [p1_new[0], p1_new[1]],
            [p2_new[0], p2_new[1]],
            [p3_new[0], p3_new[1]],
            [p4_new[0], p4_new[1]]
        ], dtype="float32")

        self.transformation_matrix = cv2.getPerspectiveTransform(rect, dst)
        self.transformated_image_resolution = (int(p2_new[0]), self.image_resolution[0])  # width: most right point / height: height from orignal image
项目:autonomous_driving    作者:StatueFungus    | 项目源码 | 文件源码
def _calculate_transformation_matrix(self):
        p1_w, p2_w, p3_w, p4_w = self._calculate_world_coordinates()

        rect = np.array([
            [0, self.horizon_y],
            [self.image_resolution[1] - 1, self.horizon_y],
            [self.image_resolution[1] - 1, self.image_resolution[0] - 1],
            [0, self.image_resolution[0] - 1]
        ], dtype="float32")

        p1_new, p2_new, p3_new, p4_new = self._calculate_destination_points(
            p1_w, p2_w, p3_w, p4_w)

        dst = np.array([
            [p1_new[0], p1_new[1]],
            [p2_new[0], p2_new[1]],
            [p3_new[0], p3_new[1]],
            [p4_new[0], p4_new[1]]
        ], dtype="float32")

        self.transformation_matrix = cv2.getPerspectiveTransform(rect, dst)
        self.transformated_image_resolution = (int(p2_new[0]), self.image_resolution[0])  # width: most right point / height: height from orignal image
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def transform_by4(self, img, points):
        points = sorted(points, key=lambda x: x[1])
        if len(points) == 4:
            top = sorted(points[:2], key=lambda x: x[0])
            bottom = sorted(points[2:], key=lambda x: x[0], reverse=True)
            points = np.array(top + bottom, dtype='float32')
        else:
            y_min, y_max = points[0][1], points[-1][1]
            points = sorted(points, key=lambda x: x[0])
            x_min, x_max = points[0][0], points[-1][0]
            points = np.array([np.array([x_min, y_min]),
                               np.array([x_max, y_min]),
                               np.array([x_max, y_max]),
                               np.array([x_min, y_max])],
                              np.float32)

        width = max(np.sqrt(((points[0][0] - points[2][0]) ** 2) * 2),
                    np.sqrt(((points[1][0] - points[3][0]) ** 2) * 2))
        height = max(np.sqrt(((points[0][1] - points[2][1]) ** 2) * 2),
                     np.sqrt(((points[1][1] - points[3][1]) ** 2) * 2))

        dst = np.array([np.array([0, 0]),
                        np.array([width - 1, 0]),
                        np.array([width - 1, height - 1]),
                        np.array([0, height - 1]),
                        ], np.float32)

        # ??????????????????????????
        trans = cv2.getPerspectiveTransform(points, dst)
        return cv2.warpPerspective(img, trans, (int(width), int(height)))
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def m_verts_to_screen(verts):
    #verts need to be sorted counter-clockwise stating at bottom left
    return cv2.getPerspectiveTransform(marker_corners_norm,verts)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def m_verts_from_screen(verts):
    #verts need to be sorted counter-clockwise stating at bottom left
    return cv2.getPerspectiveTransform(verts,marker_corners_norm)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def move_vertex(self,vert_idx,new_pos):
        """
        this fn is used to manipulate the surface boundary (coordinate system)
        new_pos is in uv-space coords
        if we move one vertex of the surface we need to find
        the tranformation from old quadrangle to new quardangle
        and apply that transformation to our marker uv-coords
        """
        before = marker_corners_norm
        after = before.copy()
        after[vert_idx] = new_pos
        transform = cv2.getPerspectiveTransform(after,before)
        for m in self.markers.values():
            m.uv_coords = cv2.perspectiveTransform(m.uv_coords,transform)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def m_marker_to_screen(marker):
    #verts need to be sorted counterclockwise stating at bottom left
    #marker coord system:
    # +-----------+
    # |0,1     1,1|  ^
    # |           | / \
    # |           |  |  UP
    # |0,0     1,0|  |
    # +-----------+
    mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32)
    return cv2.getPerspectiveTransform(mapped_space_one,np.array(marker['verts'],dtype=np.float32))
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def compute_warp(rect):    

    # now that we have our rectangle of points, let's compute
    # the width of our new image
    (tl, tr, br, bl) = rect
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))

    # ...and now for the height of our new image
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))

    # take the maximum of the width and height values to reach
    # our final dimensions
    maxWidth = max(int(widthA), int(widthB))
    maxHeight = max(int(heightA), int(heightB))

    # construct our destination points which will be used to
    # map the screen to a top-down, "birds eye" view
    dst = np.array([
    [0, 0],
    [maxWidth - 1, 0],
    [maxWidth - 1, maxHeight - 1],
    [0, maxHeight - 1]], dtype = "float32")

    # calculate the perspective transform matrix and warp
    # the perspective to grab the screen
    M = cv2.getPerspectiveTransform(rect, dst)

    return (maxWidth, maxHeight, dst, M)
项目:handfontgen    作者:nixeneko    | 项目源码 | 文件源码
def transform(image, rectpoints, dpmm):
    docpxls = (int(DOCSIZE[0]*dpmm),int(DOCSIZE[1]*dpmm))
    docrect = np.array(
                [(0,0), (docpxls[0], 0), (docpxls[0], docpxls[1]), (0, docpxls[1])],
                'float32')
    transmat = cv2.getPerspectiveTransform(np.array(rectpoints, 'float32'), docrect)
    return cv2.warpPerspective(image, transmat, docpxls)
项目:AutonomousParking    作者:jovanduy    | 项目源码 | 文件源码
def transform_img(self):
        """ Transform the top-down image of the arc so that it lays flat in a plane on our cv_image """
        if self.vel is not None and self.omega is not None:
            pts1 = np.float32([[0,0], [0, IMG_HEIGHT], [IMG_WIDTH, IMG_HEIGHT], [IMG_WIDTH, 0]])
            pts2 = np.float32([[200,240], [0, IMG_HEIGHT], [IMG_WIDTH, IMG_HEIGHT], [400, 240]])
            M = cv2.getPerspectiveTransform(pts1, pts2)
            self.transformed = cv2.warpPerspective(self.arc_image, M, (self.cv_image.shape[0], self.cv_image.shape[1]))
            rows, cols, channels = self.transformed.shape
            self.transformed = self.transformed[0:IMG_HEIGHT, 0: cols]
项目:guard-breaker    作者:JRJurman    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    # obtain a consistent order of the points and unpack them
    # individually
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    # compute the perspective transform matrix and then apply it
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    # return the warped image
    return warped
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def simplePerspectiveTransform(img, quad, shape=None,
                               interpolation=cv2.INTER_LINEAR,
                               inverse=False):
    p = sortCorners(quad).astype(np.float32)
    if shape is not None:
        height, width = shape
    else:
        # get output image size from avg. quad edge length
        width = int(round(0.5 * (np.linalg.norm(p[0] - p[1]) +
                                 np.linalg.norm(p[3] - p[2]))))
        height = int(round(0.5 * (np.linalg.norm(p[1] - p[2]) +
                                  np.linalg.norm(p[0] - p[3]))))

    dst = np.float32([[0,     0],
                      [width, 0],
                      [width, height],
                      [0,     height]])

    if inverse:
        s0, s1 = img.shape[:2]
        dst /= ((width / s1), (height / s0))
        H = cv2.getPerspectiveTransform(dst, p)
    else:
        H = cv2.getPerspectiveTransform(p, dst)

    return cv2.warpPerspective(img, H, (width, height), flags=interpolation)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def __init__(self, src, dst):
        """Perspective and Inverse perspective transformer

        Args:
            src: Source coordinates for perspective transformation
            dst: Destination coordinates for perspective transformation
        """
        self.src = src
        self.dst = dst
        self.M = cv2.getPerspectiveTransform(src, dst)
        self.M_inv = cv2.getPerspectiveTransform(dst, src)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def transform(self, img, offset=0):
        if offset == 0:
            return self.warp(img, self.M)
        else:
            src = self.src.copy()
            src[:, 0] = src[:, 0] + offset

            dst = self.dst.copy()
            dst[:, 0] = dst[:, 0] + offset

            M_inv = cv2.getPerspectiveTransform(src, dst)
            return self.warp(img, M_inv)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def inverse_transform(self, img, offset=0):
        if offset == 0:
            return self.warp(img, self.M_inv)
        else:
            src = self.src.copy()
            src[:, 0] = src[:, 0] + offset

            dst = self.dst.copy()
            dst[:, 0] = dst[:, 0] + offset

            M_inv = cv2.getPerspectiveTransform(dst, src)
            return self.warp(img, M_inv)
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def calibrate(self, img):
        corners = self.detect_corners(img)
        transform_matrix = cv2.getPerspectiveTransform(corners, self.corner_coords)
        return cv2.warpPerspective(img, transform_matrix, self.area)
项目:football-stats    作者:dev-labs-bg    | 项目源码 | 文件源码
def windowToFieldCoordinates(basePoint, x1, y1, x2, y2, x3, y3, x4, y4, maxWidth=0, maxHeight=0):
    (xp, yp) = basePoint
    src = np.array([
        [x1, y1],
        [x2, y2],
        [x3, y3],
        [x4, y4]], dtype = "float32")

    # those should be the same aspect as the real width/height of field
    maxWidth = (x4-x1) if maxWidth == 0 else maxWidth
    maxHeight = (y1-y2) if maxHeight == 0 else maxHeight

    # make a destination rectangle with the width and height of above (starts at 0,0)
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    # find the transformation matrix for our transforms
    transformationMatrix = cv2.getPerspectiveTransform(src, dst)

    # put the original (source) x,y points in an array (not sure why do we have to put it 3 times though)    
    original = np.array([((xp, yp), (xp, yp), (xp, yp))], dtype=np.float32)

    # use perspectiveTransform to transform our original(mouse coords) to new coords with the transformation matrix
    transformed = cv2.perspectiveTransform(original, transformationMatrix)[0][0]

    return transformed
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def old_get_perspective_transform_normalized(p1,p2):
    """
    A small wrapper around cv2.getPerspectiveTransform, with normalization of
    point locations.

    """

    return cv2.getPerspectiveTransform(p1,p2)

    mu1 = p1.mean(axis=0)
    std1 = p1.std(axis=0)
    mu2 = p2.mean(axis=0)
    std2 = p2.std(axis=0)

    p1_ = (p1 - mu1) / std1
    p2_ = (p2 - mu2) / std2

    H_ = cv2.getPerspectiveTransform(p1_,p2_)
    A1 = np.array([[1.0/std1[0], 0.0, -mu1[0]/std1[0]],
                   [0, 1.0/std1[1], -mu1[1]/std1[1]],
                   [0,0,1.0]])
    A2inv = np.array([[std2[0], 0.0, mu2[0]],
                   [0, std2[1], mu2[1]],
                   [0,0,1.0]])
    H = A2inv.dot(H_).dot(A1)
    return H
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def old_find_homography_normalized(p1,p2):
    """
    A small wrapper around cv2.getPerspectiveTransform, with normalization of
    point locations.

    """

    return cv2.findHomography(p1,p2,method=cv2.LMEDS)[0]

    mu1 = p1.mean(axis=0)
    std1 = p1.std(axis=0)
    mu2 = p2.mean(axis=0)
    std2 = p2.std(axis=0)

    p1_ = (p1 - mu1) / std1
    p2_ = (p2 - mu2) / std2

    H_ = cv2.findHomography(p1_,p2_,method=cv2.LMEDS)[0]
    A1 = np.array([[1.0/std1[0], 0.0, -mu1[0]/std1[0]],
                   [0, 1.0/std1[1], -mu1[1]/std1[1]],
                   [0,0,1.0]])
    A2inv = np.array([[std2[0], 0.0, mu2[0]],
                   [0, std2[1], mu2[1]],
                   [0,0,1.0]])
    H = A2inv.dot(H_).dot(A1)
    return H
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask, mask_w,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))
        mask_w = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                     borderValue=(
                                         0, 0,
                                         0,))

    return image, mask, mask_w
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask, mask_w,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))
        mask_w = cv2.warpPerspective(mask_w, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                     borderValue=(
                                         0, 0,
                                         0,))

    return image, mask, mask_w
项目:lane-detection-raspberry-pi    作者:uvbakutan    | 项目源码 | 文件源码
def trans_per(self, image):

        image = self.binary_extraction(image)

        self.binary_image = image

        ysize = image.shape[0]
        xsize = image.shape[1]

        # define region of interest
        left_bottom = (xsize/10, ysize)
        apex_l = (xsize/2 - 2600/(self.look_ahead**2),  ysize - self.look_ahead*275/30)
        apex_r = (xsize/2 + 2600/(self.look_ahead**2),  ysize - self.look_ahead*275/30)
        right_bottom = (xsize - xsize/10, ysize)

        # define vertices for perspective transformation
        src = np.array([[left_bottom], [apex_l], [apex_r], [right_bottom]], dtype=np.float32)
        dst = np.float32([[xsize/3,ysize],[xsize/4.5,0],[xsize-xsize/4.5,0],[xsize-xsize/3, ysize]])

        self.M = cv2.getPerspectiveTransform(src, dst)
        self.Minv = cv2.getPerspectiveTransform(dst, src)

        if len(image.shape) > 2:
            warped = cv2.warpPerspective(image, self.M, image.shape[-2:None:-1], flags=cv2.INTER_LINEAR)
        else:
            warped = cv2.warpPerspective(image, self.M, image.shape[-1:None:-1], flags=cv2.INTER_LINEAR)
        return warped

    # creat window mask for lane detecion
项目:Computer-Vision    作者:PratikRamdasi    | 项目源码 | 文件源码
def perspectiveTransform(self):
        folder=self.sort_files()
        P=self.get_points()
        self.height,self.width=cv2.imread("Frames/1.jpg").shape[:2]
        # Process frames  
        for i in folder:
            pic="Frames/"+str(i)+".jpg"
            img = cv2.imread(pic)
            pts1 = np.float32([[P[0][0],P[0][1]],[P[1][0],P[1][1]],[P[2][0],P[2][1]],[P[3][0],P[3][1]]])
            pts2 = np.float32([[P[0][2],P[0][3]],[P[1][2],P[1][3]],[P[2][2],P[2][3]],[P[3][2],P[3][3]]])
            M = cv2.getPerspectiveTransform(pts1,pts2)
            dst = cv2.warpPerspective(img,M,(self.width,self.height))
            cv2.imwrite("Frames/%d.jpg" % i, dst)

    # Get x,y co-ordinates
项目:Face_recog_LBPH    作者:vedvasu    | 项目源码 | 文件源码
def crop_out(img,x1,y1,x2,y2,b,h):
        '''
        This function is used to crop the image into desired dimmensions.
        img: image from which the rectangle is to be cropped
        x1,y1: top left vertex parameter
        x2,y2: bottom right parameter
        b,h: dimmesions of the cropped image
        '''
        xa=x1
        xb=x2
        xc=x1
        xd=x2
        ya=y1
        yb=y1
        yc=y2
        yd=y2

        pts1 = np.float32([[xa,ya],[xc,yc],[xb,yb],[xd,yd]]) 
        pts2 = np.float32([[0,0],[0,h],[b,0],[b,h]])
        persM = cv2.getPerspectiveTransform(pts1,pts2)
        dst = cv2.warpPerspective(img,persM,(b,h))

        return dst




#detection begins here
项目:Stronghold-2016-Vision    作者:team4099    | 项目源码 | 文件源码
def get_warped_image_from_corners(image, corners):
    """
    Returns unwarped image of goal, using corners of goal and the original
    source image.
    Parameters:
        :param: `image` - the original source image with the goal in it
        :param: `corners` - a numpy array of the corner pixels of the goal

    """
    orig_image = numpy.copy(image)
    center = get_center(corners)
    corners = sort_corners(corners, center)

    height_right = int(math.sqrt((corners[1][0][0] - corners[2][0][0]) ** 2 +
                                 (corners[1][0][1] - corners[2][0][1]) ** 2))
    height_left = int(math.sqrt((corners[0][0][0] - corners[3][0][0]) ** 2 +
                                (corners[0][0][1] - corners[3][0][1]) ** 2))
    height = int((height_left + height_right) / 2)
    width = int(height * (300 / 210))

    quad = numpy.zeros((width, height))
    quad_pts = numpy.array([[[0, 0]],      [[width, 0]],
                            [[width, height]], [[0, height]]], numpy.float32)

    new_image_to_process = numpy.array(image, numpy.float32)
    quad_pts = cv2.getPerspectiveTransform(corners, quad_pts)
    warped_image = cv2.warpPerspective(new_image_to_process, quad_pts,
                                      (width, height))
    return warped_image

# def get_distance_to_goal(orig_image, warped_image):
#     angle_between_sides = (len(warped_image[0]) / len(orig_image[0])) * FOV_OF_CAMERA
#     print(math.degrees(angle_between_sides))
#     return ((WIDTH_OF_GOAL_IN_METERS / 2) / math.sin(angle_between_sides / 2)) * math.sin((math.pi + angle_between_sides) / 2)
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5, factor=1):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))

    return image, mask
项目:WeiQiRecognition    作者:JDython    | 项目源码 | 文件源码
def save_chessboard_img(resize_pic,vertical_position,parallel_position):
    pts3 = np.float32([vertical_position[0],vertical_position[1],parallel_position[0],parallel_position[1]])
    pts4 = np.float32([[0,0],[640,0],[0,480],[640,480]])
    M_perspective = cv2.getPerspectiveTransform(pts3,pts4)
    img_perspective = cv2.warpPerspective(resize_pic, M_perspective, (0, 0))
    cv2.imwrite('static/InterceptedIMG/clip.jpg',img_perspective)
    return img_perspective
项目:ArkwoodAR    作者:rdmilligan    | 项目源码 | 文件源码
def get_topdown_quad(image, src):

    # src and dst points
    src = _order_points(src)

    (max_width,max_height) = _max_width_height(src)
    dst = _topdown_points(max_width, max_height)

    # warp perspective
    matrix = cv2.getPerspectiveTransform(src, dst)
    warped = cv2.warpPerspective(image, matrix, _max_width_height(src))

    return warped
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    # obtain a consistent order of the points and unpack them
    # individually
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    # compute the perspective transform matrix and then apply it
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    # return the warped image
    return warped
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))

    return image, mask
项目:digital-display-character-rec    作者:upupnaway    | 项目源码 | 文件源码
def normalize_contrs(img,cntr_pts):
  ratio = img.shape[0] / 300.0
  norm_pts = np.zeros((4,2), dtype="float32")

  s = cntr_pts.sum(axis=1)
  norm_pts[0] = cntr_pts[np.argmin(s)]
  norm_pts[2] = cntr_pts[np.argmax(s)]

  d = np.diff(cntr_pts,axis=1)
  norm_pts[1] = cntr_pts[np.argmin(d)]
  norm_pts[3] = cntr_pts[np.argmax(d)]

  norm_pts *= ratio

  (top_left, top_right, bottom_right, bottom_left) = norm_pts

  width1 = np.sqrt(((bottom_right[0] - bottom_left[0]) ** 2) + ((bottom_right[1] - bottom_left[1]) ** 2))
  width2 = np.sqrt(((top_right[0] - top_left[0]) ** 2) + ((top_right[1] - top_left[1]) ** 2))
  height1 = np.sqrt(((top_right[0] - bottom_right[0]) ** 2) + ((top_right[1] - bottom_right[1]) ** 2))
  height2 = np.sqrt(((top_left[0] - bottom_left[0]) ** 2) + ((top_left[1] - bottom_left[1]) ** 2))

  max_width = max(int(width1), int(width2))
  max_height = max(int(height1), int(height2))

  dst = np.array([[0,0], [max_width -1, 0],[max_width -1, max_height -1],[0, max_height-1]], dtype="float32")
  persp_matrix = cv2.getPerspectiveTransform(norm_pts,dst)
  return cv2.warpPerspective(img,persp_matrix,(max_width,max_height))
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    # obtain a consistent order of the points and unpack them
    # individually
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    # compute the perspective transform matrix and then apply it
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    # return the warped image
    return warped
项目:pcbre    作者:pcbre    | 项目源码 | 文件源码
def update_matricies(self):
        # Build compatible arrays for cv2.getPerspectiveTransform
        src = numpy.ones((4,2), dtype=numpy.float32)
        dst = numpy.ones((4,2), dtype=numpy.float32)
        src[:, :2] = self.align_handles[:4]
        dst[:, :2] = corners

        # And update the perspective transform
        self.persp_matrix = cv2.getPerspectiveTransform(src, dst)

        # Now, calculate the scale factor
        da = self.dim_handles[1] - self.dim_handles[0]
        db = self.dim_handles[3] - self.dim_handles[2]

        ma = da.mag()
        mb = db.mag()

        sf = 100.0/max(ma, mb)

        self.placeholder_dim_values[0] = sf * ma * MM
        self.placeholder_dim_values[1] = sf * mb * MM

        dims = self.__active_dims()

        # Perspective transform handles - convert to
        handles_pp = []
        for handle in self.dim_handles:
            p1 = self.persp_matrix.dot(handle.homol())
            p1 /= p1[2]
            handles_pp.append(p1[:2])

        da = handles_pp[1] - handles_pp[0]
        db = handles_pp[3] - handles_pp[2]
        A = numpy.vstack([da**2, db**2])
        B = numpy.array(dims) ** 2
        res = numpy.abs(numpy.linalg.solve(A, B)) ** .5

        self.scale_matrix = scale(res[0], res[1])
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def return_random_perspective(img, row):
    perc = 0.1
    cols = img.shape[1]
    rows = img.shape[0]
    start0_max, end0_max, start1_max, end1_max = get_bounding_boxes_positions(img, row)

    if start1_max <= 0:
        p1 = random.randint(0, int(img.shape[1] * perc))
    else:
        p1 = random.randint(0, start1_max)

    if start0_max <= 0:
        p2 = random.randint(0, int(img.shape[0] * perc))
    else:
        p2 = random.randint(0, start0_max)

    if end1_max >= img.shape[1]:
        p3 = img.shape[1] - random.randint(0, int(img.shape[1] * perc))
    else:
        p3 = random.randint(end1_max, img.shape[1])

    if start0_max <= 0:
        p4 = random.randint(0, int(img.shape[0] * perc))
    else:
        p4 = random.randint(0, start0_max)

    if start1_max <= 0:
        p5 = random.randint(0, int(img.shape[1] * perc))
    else:
        p5 = random.randint(0, start1_max)

    if end0_max >= img.shape[0]:
        p6 = img.shape[0] - random.randint(0, int(img.shape[0] * perc))
    else:
        p6 = random.randint(end0_max, img.shape[0])

    if end1_max >= img.shape[1]:
        p7 = img.shape[1] - random.randint(0, int(img.shape[1] * perc))
    else:
        p7 = random.randint(end1_max, img.shape[1])

    if end0_max >= img.shape[0]:
        p8 = img.shape[0] - random.randint(0, int(img.shape[0] * perc))
    else:
        p8 = random.randint(end0_max, img.shape[0])

    pts1 = np.float32([[p1, p2], [p3, p4], [p5, p6], [p7, p8]])
    pts2 = np.float32([[0, 0], [cols, 0], [0, rows], [cols, rows]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    # img = cv2.rectangle(img, (int(start1_max), int(start0_max)), (int(end1_max), int(end0_max)), (0, 0, 255), thickness=5)
    dst = cv2.warpPerspective(img, M, (cols, rows))
    # show_resized_image(dst)
    return dst