Python cv2 模块,warpPerspective() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.warpPerspective()

项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def render_lane(image, corners, ploty, fitx, ):

    _,  src,  dst = perspective_transform(image, corners)
    Minv = cv2.getPerspectiveTransform(dst, src)

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(image[:,:,0]).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts = np.vstack((fitx,ploty)).astype(np.int32).T

    # Draw the lane onto the warped blank image
    #plt.plot(left_fitx, ploty, color='yellow')
    cv2.polylines(color_warp,  [pts],  False,  (0, 255, 0),  10)
    #cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 

    # Combine the result with the original image
    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)

    return result
项目:SDcarsLaneDetection    作者:Nazanin1369    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, mtx, dist):
    # Use the OpenCV undistort() function to remove distortion
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:Roomba980-Python    作者:NickWaterton    | 项目源码 | 文件源码
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def warpImage(src, theta, phi, gamma, scale, fovy):
    halfFovy = fovy * 0.5
    d = math.hypot(src.shape[1], src.shape[0])
    sideLength = scale * d / math.cos(deg2Rad(halfFovy))
    sideLength = np.int32(sideLength)

    M = warpMatrix(src.shape[1], src.shape[0], theta, phi, gamma, scale, fovy)
    dst = cv2.warpPerspective(src, M, (sideLength, sideLength))
    mid_x = mid_y = dst.shape[0] // 2
    target_x = target_y = src.shape[0] // 2
    offset = (target_x % 2)

    if len(dst.shape) == 3:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset,
              :]
    else:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset]

    return dst
项目:RacingRobot    作者:sergionr2    | 项目源码 | 文件源码
def rotateImage(image, phi, theta, psi):
    """
    Rotate an image
    :param image: (cv2 image object)
    :param phi: (float)
    :param theta: (float)
    :param psi: (float)
    :return: (cv2 image object)
    """
    # Height, Width, Channels
    h, w, c = image.shape
    F = np.float32([[300, 0, w / 2.], [0, 300, h / 2.], [0, 0, 1]])
    R = rotMatrix([phi, theta, psi])
    T = [[0], [0], [1]]
    T = np.dot(R, T)
    R[0][2] = T[0][0]
    R[1][2] = T[1][0]
    R[2][2] = T[2][0]
    M = np.dot(F, np.linalg.inv(np.dot(F, R)))
    out = cv2.warpPerspective(image, M, (w, h))
    return out
项目:specularity-removal    作者:gmichaeljaison    | 项目源码 | 文件源码
def _solve(img1, img2):
    h, w, d = img1.shape

    # step 1: Find homography of 2 images
    homo = homography(img2, img1)

    # step 2: warp image2 to image1 frame
    img2_w = cv.warpPerspective(img2, homo, (w, h))

    # step 3: resolve highlights by picking the best pixels out of two images
    im1 = _resolve_spec(img1, img2_w)

    # step 4: repeat the same process for Image2 using warped Image1
    im_w = cv.warpPerspective(im1, np.linalg.inv(homo), (w, h))
    im2 = _resolve_spec(img2, im_w)

    return im1, im2
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def do_warp(M, warp):
    warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
    warp = exposure.rescale_intensity(warp, out_range = (0, 255))

    # the pokemon we want to identify will be in the top-right
    # corner of the warped image -- let's crop this region out
    (h, w) = warp.shape
    (dX, dY) = (int(w * 0.4), int(h * 0.45))
    crop = warp[10:dY, w - dX:w - 10]

    # save the cropped image to file
    cv2.imwrite("cropped.png", crop)

    # show our images
    cv2.imshow("image", image)
    cv2.imshow("edge", edged)
    cv2.imshow("warp", imutils.resize(warp, height = 300))
    cv2.imshow("crop", imutils.resize(crop, height = 300))
    cv2.waitKey(0)
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def maskFace(self, frame_image, face):

        img1 = cv2.imread(self.__class__.mask_path, cv2.IMREAD_UNCHANGED);
        elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
        mask = self.getTransPIL(cv2.warpPerspective(img1, h, (frame_image.width,frame_image.height)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))

        enhancer = ImageEnhance.Color(frame_image)
        enhanced = enhancer.enhance(0.1)
        enhancer = ImageEnhance.Brightness(enhanced)
        enhanced = enhancer.enhance(1.2)
        enhancer = ImageEnhance.Contrast(enhanced)
        enhanced = enhancer.enhance(1.2)

        frame_image.paste(enhanced, (0,0), mask)
        frame_image.paste(mask_elements, (0,0), mask_elements)
项目:quadrilaterals-rectifier    作者:michal2229    | 项目源码 | 文件源码
def extract_rect(im):
    imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)

    ret,thresh = cv2.threshold(imgray, 127, 255, 0)

    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # finding contour with max area
    largest = None
    for cnt in contours:
        if largest == None or cv2.contourArea(cnt) > cv2.contourArea(largest):
            largest = cnt

    peri = cv2.arcLength(largest, True)
    appr = cv2.approxPolyDP(largest, 0.02 * peri, True)

    #cv2.drawContours(im, appr, -1, (0,255,0), 3)
    points_list = [[i[0][0], i[0][1]] for i in appr] 

    left  = sorted(points_list, key = lambda p: p[0])[0:2]
    right = sorted(points_list, key = lambda p: p[0])[2:4]

    print("l " + str(left))
    print("r " + str(right))

    lu = sorted(left, key = lambda p: p[1])[0]
    ld = sorted(left, key = lambda p: p[1])[1]

    ru = sorted(right, key = lambda p: p[1])[0]
    rd = sorted(right, key = lambda p: p[1])[1]

    print("lu " + str(lu))
    print("ld " + str(ld))
    print("ru " + str(ru))
    print("rd " + str(rd))

    lu_ = [ (lu[0] + ld[0])/2, (lu[1] + ru[1])/2 ]
    ld_ = [ (lu[0] + ld[0])/2, (ld[1] + rd[1])/2 ]
    ru_ = [ (ru[0] + rd[0])/2, (lu[1] + ru[1])/2 ]
    rd_ = [ (ru[0] + rd[0])/2, (ld[1] + rd[1])/2 ]

    print("lu_ " + str(lu_))
    print("ld_ " + str(ld_))
    print("ru_ " + str(ru_))
    print("rd_ " + str(rd_))

    src_pts = np.float32(np.array([lu, ru, rd, ld]))
    dst_pts = np.float32(np.array([lu_, ru_, rd_, ld_]))

    h,w,b = im.shape
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    print("H" + str(H))

    imw =  cv2.warpPerspective(im, H, (w, h))

    return imw[lu_[1]:rd_[1], lu_[0]:rd_[0]] # cropping image
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    rect = order_points(pts)
    (tl, tr, br, bl) = rect
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32"
    )
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
    return warped
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _fitImg(self, img):
        '''
        fit perspective and size of the input image to the reference image
        '''
        img = imread(img, 'gray')
        if self.bg is not None:
            img = cv2.subtract(img, self.bg)

        if self.lens is not None:
            img = self.lens.correct(img, keepSize=True)

        (H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
        H_inv = self.invertHomography(H)

        s = self.obj_shape
        fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
        return fit, img, H, H_inv, n_matches
项目:omr    作者:rbaron    | 项目源码 | 文件源码
def perspective_transform(img, points):
    """Transform img so that points are the new corners"""

    source = np.array(
        points,
        dtype="float32")

    dest = np.array([
        [TRANSF_SIZE, TRANSF_SIZE],
        [0, TRANSF_SIZE],
        [0, 0],
        [TRANSF_SIZE, 0]],
        dtype="float32")

    img_dest = img.copy()
    transf = cv2.getPerspectiveTransform(source, dest)
    warped = cv2.warpPerspective(img, transf, (TRANSF_SIZE, TRANSF_SIZE))
    return warped
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def perspective_transform(image, corners, debug=False, xoffset=0):

     height, width = image.shape[0:2]
     output_size = height/2

     new_top_left=np.array([corners[0,0],0])
     new_top_right=np.array([corners[3,0],0])
     offset=[xoffset,0]    
     img_size = (image.shape[1], image.shape[0])
     src = np.float32([corners[0],corners[1],corners[2],corners[3]])
     dst = np.float32([corners[0]+offset,new_top_left+offset,new_top_right-offset ,corners[3]-offset]) 

     M = cv2.getPerspectiveTransform(src, dst)

     warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)

     if debug:
         drawQuad(image, src, [255, 0, 0])
         drawQuad(warped, dst, [255, 255, 0])
         plt.imshow(image)
         plt.show()
         plt.imshow(warped)
         plt.show()

     return warped,  src,  dst
项目:diy_driverless_car_ROS    作者:wilselby    | 项目源码 | 文件源码
def perspective_transform(self,  image, debug=True, size_top=70, size_bottom=370):
        height, width = image.shape[0:2]
        output_size = height/2

        #src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
        src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
        dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
        #dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])

        M = cv2.getPerspectiveTransform(src, dst)
        print(M)
        warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)

        if debug:
            self.drawQuad(image, src, [255, 0, 0])
            self.drawQuad(image, dst, [255, 255, 0])
            plt.imshow(image)
            plt.show()

        return warped
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
    num, W, H, _ = ims_src.shape
    if distortion:
        ran_noise = np.random.random((4, 2))
        ran_color = np.random.randn(3,)
    else:
        ran_noise = np.ones((4, 2)) * 0.5
        ran_color = np.zeros(3,)

    # perspective translation
    dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
    noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
    src = np.float32(dst + noise)

    mat = cv2.getPerspectiveTransform(src, dst)
    for i in range(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    # color deviation
    deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
    ims_src += deviation[None, None, None, :]

    return ims_src, mat
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def im_augmentation(ims_src, weight, vec, trans=0.1, color_dev=0.1, distortion=True):
    num, W, H, _ = ims_src.shape
    if distortion:
        ran_noise = np.random.random((4, 2))
        ran_color = np.random.randn(3,)
    else:
        ran_noise = np.ones((4, 2)) * 0.5
        ran_color = np.zeros(3,)

    # perspective translation
    dst = np.float32([[0., 0.], [1., 0.], [0., 1.], [1., 1.]]) * np.float32([W, H])
    noise = trans * ran_noise * np.float32([[1., 1.], [-1., 1.], [1., -1.], [-1., -1.]]) * [W, H]
    src = np.float32(dst + noise)

    mat = cv2.getPerspectiveTransform(src, dst)
    for i in range(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    # color deviation
    deviation = np.dot(vec, (color_dev * ran_color * weight)) * 255.
    ims_src += deviation[None, None, None, :]

    return ims_src, mat
项目:ego-lane-analysis-system    作者:rodrigoberriel    | 项目源码 | 文件源码
def apply_ipm(img, config, ys):
    # IPM
    y_top, y_bottom = min(ys), max(ys)
    ipm_pts = config['dataset']['ipm_points']
    roi = config['dataset']['region_of_interest']

    src = np.array([
        [ipm_pts['@top_left'], y_top],
        [ipm_pts['@top_right'], y_top],
        [ipm_pts['@bottom_right'], y_bottom],
        [ipm_pts['@bottom_left'], y_bottom],

       ], dtype="float32")

    dst = np.array([
        [ipm_pts['@top_left'], 0],
        [ipm_pts['@top_right'], 0],
        [ipm_pts['@top_right'], roi['@height']],
        [ipm_pts['@top_left'], roi['@height']],
       ], dtype="float32")

    M = cv2.getPerspectiveTransform(src, dst)
    return cv2.warpPerspective(img, M, (roi['@width'], roi['@height']))
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def four_point_transform(image, pts):

    rect = order_points(pts)
    (tl, tr, br, bl) = rect


    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    return warped
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomShiftScaleRotate(img, shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, u=0.5):
    if random.random() < u:
        height, width, channel = img.shape

        angle = random.uniform(-rotate_limit, rotate_limit)  # degree
        scale = random.uniform(1 - scale_limit, 1 + scale_limit)
        dx = round(random.uniform(-shift_limit, shift_limit)) * width
        dy = round(random.uniform(-shift_limit, shift_limit)) * height

        cc = math.cos(angle / 180 * math.pi) * (scale)
        ss = math.sin(angle / 180 * math.pi) * (scale)
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        img = cv2.warpPerspective(img, mat, (width, height), flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_REFLECT_101)  # cv2.BORDER_CONSTANT, borderValue = (0, 0, 0))  #cv2.BORDER_REFLECT_101

    return img
项目:answer-sheet-scan    作者:inuyasha2012    | 项目源码 | 文件源码
def detect_cnt_again(poly, base_img):
    """
    ???????????????????
    :param poly: ndarray
    :param base_img: ndarray
    :return: ndarray
    """
    # ?????????????????flag
    flag = False

    # ?????????????????????????
    top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
    roi_img = get_roi_img(base_img, bottom_left, bottom_right, top_left, top_right)
    img = get_init_process_img(roi_img)

    # ?????????
    cnt = get_max_area_cnt(img)

    # ?????????????????????
    if cv2.contourArea(cnt) > roi_img.shape[0] * roi_img.shape[1] * SHEET_AREA_MIN_RATIO:
        flag = True
        poly = cv2.approxPolyDP(cnt, cv2.arcLength((cnt,), True) * 0.1, True)
        top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
        if not poly.shape[0] == 4:
            raise PolyNodeCountError

    # ?????????????????
    base_poly_nodes = np.float32([top_left[0], bottom_left[0], top_right[0], bottom_right[0]])
    base_nodes = np.float32([[0, 0],
                            [base_img.shape[1], 0],
                            [0, base_img.shape[0]],
                            [base_img.shape[1], base_img.shape[0]]])
    transmtx = cv2.getPerspectiveTransform(base_poly_nodes, base_nodes)

    if flag:
        img_warp = cv2.warpPerspective(roi_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    else:
        img_warp = cv2.warpPerspective(base_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    return img_warp
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def warp_image(img, tM, shape):
    out = np.zeros(shape, dtype=img.dtype)
    # cv2.warpAffine(img,
    #                tM[:2],
    #                (shape[1], shape[0]),
    #                dst=out,
    #                borderMode=cv2.BORDER_TRANSPARENT,
    #                flags=cv2.WARP_INVERSE_MAP)
    cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
                        borderMode=cv2.BORDER_TRANSPARENT,
                        flags=cv2.WARP_INVERSE_MAP)
    return out

# TODO: Modify this method to get a better face contour mask
项目:LensCalibrator    作者:1024jp    | 项目源码 | 文件源码
def project_image(self, image, size, offset=(0, 0)):
        """Remove parspective from given image.

        Arguments:
        image numpy.array -- Image source in numpy image form.
        size ([int]) -- Size of the output image.
        """
        translation = np.matrix([
            [1.0, 0.0, -offset[0]],
            [0.0, 1.0, -offset[1]],
            [0.0, 0.0, 1.0]
        ])
        matrix = translation * self.homography

        return cv2.warpPerspective(image, matrix, tuple(size))
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
项目:yonkoma2data    作者:esuji5    | 项目源码 | 文件源码
def transform_by4(self, img, points):
        points = sorted(points, key=lambda x: x[1])
        if len(points) == 4:
            top = sorted(points[:2], key=lambda x: x[0])
            bottom = sorted(points[2:], key=lambda x: x[0], reverse=True)
            points = np.array(top + bottom, dtype='float32')
        else:
            y_min, y_max = points[0][1], points[-1][1]
            points = sorted(points, key=lambda x: x[0])
            x_min, x_max = points[0][0], points[-1][0]
            points = np.array([np.array([x_min, y_min]),
                               np.array([x_max, y_min]),
                               np.array([x_max, y_max]),
                               np.array([x_min, y_max])],
                              np.float32)

        width = max(np.sqrt(((points[0][0] - points[2][0]) ** 2) * 2),
                    np.sqrt(((points[1][0] - points[3][0]) ** 2) * 2))
        height = max(np.sqrt(((points[0][1] - points[2][1]) ** 2) * 2),
                     np.sqrt(((points[1][1] - points[3][1]) ** 2) * 2))

        dst = np.array([np.array([0, 0]),
                        np.array([width - 1, 0]),
                        np.array([width - 1, height - 1]),
                        np.array([0, height - 1]),
                        ], np.float32)

        # ??????????????????????????
        trans = cv2.getPerspectiveTransform(points, dst)
        return cv2.warpPerspective(img, trans, (int(width), int(height)))
项目:handfontgen    作者:nixeneko    | 项目源码 | 文件源码
def transform(image, rectpoints, dpmm):
    docpxls = (int(DOCSIZE[0]*dpmm),int(DOCSIZE[1]*dpmm))
    docrect = np.array(
                [(0,0), (docpxls[0], 0), (docpxls[0], docpxls[1]), (0, docpxls[1])],
                'float32')
    transmat = cv2.getPerspectiveTransform(np.array(rectpoints, 'float32'), docrect)
    return cv2.warpPerspective(image, transmat, docpxls)
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def maskMouth(self, frame_image, face):
        elements = cv2.imread(self.__class__.mask_mouth_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_mouth_points, np.array(self.getMouthPoints(face)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))

        frame_image.paste(mask_elements, (0,0), mask_elements)
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def maskFace(self, frame_image, face):

        elements = cv2.imread(self.__class__.mask_elements_path, cv2.IMREAD_UNCHANGED);

        h, status = cv2.findHomography(self.average_points, np.array(self.getFacePoints(face)))
        mask_elements = self.getTransPIL(cv2.warpPerspective(elements, h, (frame_image.width,frame_image.height)))
        frame_image.paste(mask_elements, (0,0), mask_elements)
项目:SynthText    作者:ankush-me    | 项目源码 | 文件源码
def warpHomography(self,src_mat,H,dst_size):
        dst_mat = cv2.warpPerspective(src_mat, H, dst_size,
                                      flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
        return dst_mat
项目:AutonomousParking    作者:jovanduy    | 项目源码 | 文件源码
def transform_img(self):
        """ Transform the top-down image of the arc so that it lays flat in a plane on our cv_image """
        if self.vel is not None and self.omega is not None:
            pts1 = np.float32([[0,0], [0, IMG_HEIGHT], [IMG_WIDTH, IMG_HEIGHT], [IMG_WIDTH, 0]])
            pts2 = np.float32([[200,240], [0, IMG_HEIGHT], [IMG_WIDTH, IMG_HEIGHT], [400, 240]])
            M = cv2.getPerspectiveTransform(pts1, pts2)
            self.transformed = cv2.warpPerspective(self.arc_image, M, (self.cv_image.shape[0], self.cv_image.shape[1]))
            rows, cols, channels = self.transformed.shape
            self.transformed = self.transformed[0:IMG_HEIGHT, 0: cols]
项目:guard-breaker    作者:JRJurman    | 项目源码 | 文件源码
def four_point_transform(image, pts):
    # obtain a consistent order of the points and unpack them
    # individually
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    # compute the perspective transform matrix and then apply it
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    # return the warped image
    return warped
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def fitImg(self, img_rgb):
        '''
        fit perspective and size of the input image to the base image
        '''
        H = self.pattern.findHomography(img_rgb)[0]
        H_inv = self.pattern.invertHomography(H)
        s = self.img_orig.shape
        warped = cv2.warpPerspective(img_rgb, H_inv, (s[1], s[0]))
        return warped
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def simplePerspectiveTransform(img, quad, shape=None,
                               interpolation=cv2.INTER_LINEAR,
                               inverse=False):
    p = sortCorners(quad).astype(np.float32)
    if shape is not None:
        height, width = shape
    else:
        # get output image size from avg. quad edge length
        width = int(round(0.5 * (np.linalg.norm(p[0] - p[1]) +
                                 np.linalg.norm(p[3] - p[2]))))
        height = int(round(0.5 * (np.linalg.norm(p[1] - p[2]) +
                                  np.linalg.norm(p[0] - p[3]))))

    dst = np.float32([[0,     0],
                      [width, 0],
                      [width, height],
                      [0,     height]])

    if inverse:
        s0, s1 = img.shape[:2]
        dst /= ((width / s1), (height / s0))
        H = cv2.getPerspectiveTransform(dst, p)
    else:
        H = cv2.getPerspectiveTransform(p, dst)

    return cv2.warpPerspective(img, H, (width, height), flags=interpolation)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def uncorrect(self, img):
        img = imread(img)
        s = img.shape[:2]
        return cv2.warpPerspective(img, self.homography, s[::-1],
                                   flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def warp(self, img, matrix):
        return cv2.warpPerspective(img, matrix, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def calibrate(self, img):
        corners = self.detect_corners(img)
        transform_matrix = cv2.getPerspectiveTransform(corners, self.corner_coords)
        return cv2.warpPerspective(img, transform_matrix, self.area)
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask, mask_w,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))
        mask_w = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                     borderValue=(
                                         0, 0,
                                         0,))

    return image, mask, mask_w
项目:unet-tensorflow    作者:timctho    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask, mask_w,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))
        mask_w = cv2.warpPerspective(mask_w, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                     borderValue=(
                                         0, 0,
                                         0,))

    return image, mask, mask_w
项目:image_stacking    作者:maitek    | 项目源码 | 文件源码
def stackImagesECC(file_list):
    M = np.eye(3, 3, dtype=np.float32)

    first_image = None
    stacked_image = None

    for file in file_list:
        image = cv2.imread(file,1).astype(np.float32) / 255
        print(file)
        if first_image is None:
            # convert to gray scale floating point image
            first_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
            stacked_image = image
        else:
            # Estimate perspective transform
            s, M = cv2.findTransformECC(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY), first_image, M, cv2.MOTION_HOMOGRAPHY)
            w, h, _ = image.shape
            # Align image to first image
            image = cv2.warpPerspective(image, M, (h, w))
            stacked_image += image

    stacked_image /= len(file_list)
    stacked_image = (stacked_image*255).astype(np.uint8)
    return stacked_image


# Align and stack images by matching ORB keypoints
# Faster but less accurate
项目:lane-detection-raspberry-pi    作者:uvbakutan    | 项目源码 | 文件源码
def trans_per(self, image):

        image = self.binary_extraction(image)

        self.binary_image = image

        ysize = image.shape[0]
        xsize = image.shape[1]

        # define region of interest
        left_bottom = (xsize/10, ysize)
        apex_l = (xsize/2 - 2600/(self.look_ahead**2),  ysize - self.look_ahead*275/30)
        apex_r = (xsize/2 + 2600/(self.look_ahead**2),  ysize - self.look_ahead*275/30)
        right_bottom = (xsize - xsize/10, ysize)

        # define vertices for perspective transformation
        src = np.array([[left_bottom], [apex_l], [apex_r], [right_bottom]], dtype=np.float32)
        dst = np.float32([[xsize/3,ysize],[xsize/4.5,0],[xsize-xsize/4.5,0],[xsize-xsize/3, ysize]])

        self.M = cv2.getPerspectiveTransform(src, dst)
        self.Minv = cv2.getPerspectiveTransform(dst, src)

        if len(image.shape) > 2:
            warped = cv2.warpPerspective(image, self.M, image.shape[-2:None:-1], flags=cv2.INTER_LINEAR)
        else:
            warped = cv2.warpPerspective(image, self.M, image.shape[-1:None:-1], flags=cv2.INTER_LINEAR)
        return warped

    # creat window mask for lane detecion
项目:Computer-Vision    作者:PratikRamdasi    | 项目源码 | 文件源码
def perspectiveTransform(self):
        folder=self.sort_files()
        P=self.get_points()
        self.height,self.width=cv2.imread("Frames/1.jpg").shape[:2]
        # Process frames  
        for i in folder:
            pic="Frames/"+str(i)+".jpg"
            img = cv2.imread(pic)
            pts1 = np.float32([[P[0][0],P[0][1]],[P[1][0],P[1][1]],[P[2][0],P[2][1]],[P[3][0],P[3][1]]])
            pts2 = np.float32([[P[0][2],P[0][3]],[P[1][2],P[1][3]],[P[2][2],P[2][3]],[P[3][2],P[3][3]]])
            M = cv2.getPerspectiveTransform(pts1,pts2)
            dst = cv2.warpPerspective(img,M,(self.width,self.height))
            cv2.imwrite("Frames/%d.jpg" % i, dst)

    # Get x,y co-ordinates
项目:Face_recog_LBPH    作者:vedvasu    | 项目源码 | 文件源码
def crop_out(img,x1,y1,x2,y2,b,h):
        '''
        This function is used to crop the image into desired dimmensions.
        img: image from which the rectangle is to be cropped
        x1,y1: top left vertex parameter
        x2,y2: bottom right parameter
        b,h: dimmesions of the cropped image
        '''
        xa=x1
        xb=x2
        xc=x1
        xd=x2
        ya=y1
        yb=y1
        yc=y2
        yd=y2

        pts1 = np.float32([[xa,ya],[xc,yc],[xb,yb],[xd,yd]]) 
        pts2 = np.float32([[0,0],[0,h],[b,0],[b,h]])
        persM = cv2.getPerspectiveTransform(pts1,pts2)
        dst = cv2.warpPerspective(img,persM,(b,h))

        return dst




#detection begins here
项目:Stronghold-2016-Vision    作者:team4099    | 项目源码 | 文件源码
def get_warped_image_from_corners(image, corners):
    """
    Returns unwarped image of goal, using corners of goal and the original
    source image.
    Parameters:
        :param: `image` - the original source image with the goal in it
        :param: `corners` - a numpy array of the corner pixels of the goal

    """
    orig_image = numpy.copy(image)
    center = get_center(corners)
    corners = sort_corners(corners, center)

    height_right = int(math.sqrt((corners[1][0][0] - corners[2][0][0]) ** 2 +
                                 (corners[1][0][1] - corners[2][0][1]) ** 2))
    height_left = int(math.sqrt((corners[0][0][0] - corners[3][0][0]) ** 2 +
                                (corners[0][0][1] - corners[3][0][1]) ** 2))
    height = int((height_left + height_right) / 2)
    width = int(height * (300 / 210))

    quad = numpy.zeros((width, height))
    quad_pts = numpy.array([[[0, 0]],      [[width, 0]],
                            [[width, height]], [[0, height]]], numpy.float32)

    new_image_to_process = numpy.array(image, numpy.float32)
    quad_pts = cv2.getPerspectiveTransform(corners, quad_pts)
    warped_image = cv2.warpPerspective(new_image_to_process, quad_pts,
                                      (width, height))
    return warped_image

# def get_distance_to_goal(orig_image, warped_image):
#     angle_between_sides = (len(warped_image[0]) / len(orig_image[0])) * FOV_OF_CAMERA
#     print(math.degrees(angle_between_sides))
#     return ((WIDTH_OF_GOAL_IN_METERS / 2) / math.sin(angle_between_sides / 2)) * math.sin((math.pi + angle_between_sides) / 2)
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def randomShiftScaleRotate(image, mask,
                           shift_limit=(-0.0625, 0.0625),
                           scale_limit=(-0.1, 0.1),
                           rotate_limit=(-45, 45), aspect_limit=(0, 0),
                           borderMode=cv2.BORDER_CONSTANT, u=0.5, factor=1):
    if np.random.random() < u:
        height, width, channel = image.shape

        angle = np.random.uniform(rotate_limit[0], rotate_limit[1])  # degree
        scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
        aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
        sx = scale * aspect / (aspect ** 0.5)
        sy = scale / (aspect ** 0.5)
        dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
        dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)

        cc = np.math.cos(angle / 180 * np.math.pi) * sx
        ss = np.math.sin(angle / 180 * np.math.pi) * sy
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                    borderValue=(
                                        0, 0,
                                        0,))
        mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
                                   borderValue=(
                                       0, 0,
                                       0,))

    return image, mask
项目:WeiQiRecognition    作者:JDython    | 项目源码 | 文件源码
def save_chessboard_img(resize_pic,vertical_position,parallel_position):
    pts3 = np.float32([vertical_position[0],vertical_position[1],parallel_position[0],parallel_position[1]])
    pts4 = np.float32([[0,0],[640,0],[0,480],[640,480]])
    M_perspective = cv2.getPerspectiveTransform(pts3,pts4)
    img_perspective = cv2.warpPerspective(resize_pic, M_perspective, (0, 0))
    cv2.imwrite('static/InterceptedIMG/clip.jpg',img_perspective)
    return img_perspective
项目:image_rectification    作者:evanlev    | 项目源码 | 文件源码
def myApplyH(im, H):
    return cv2.warpPerspective(im, H, (im.shape[1], im.shape[0]))

# INPUTS: 
#   im = image
# OUTPUTS:
#   image with outer regions cropped
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def of_augmentation(ims_src, mat):
    num, W, H, _ = ims_src.shape
    for i in xrange(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    return ims_src
项目:crnn_tf    作者:liuhu-bigeye    | 项目源码 | 文件源码
def of_augmentation(ims_src, mat):
    num, W, H, _ = ims_src.shape
    for i in xrange(num):
        ims_src[i] = cv2.warpPerspective(ims_src[i], mat, (W, H))

    return ims_src
项目:ArkwoodAR    作者:rdmilligan    | 项目源码 | 文件源码
def get_topdown_quad(image, src):

    # src and dst points
    src = _order_points(src)

    (max_width,max_height) = _max_width_height(src)
    dst = _topdown_points(max_width, max_height)

    # warp perspective
    matrix = cv2.getPerspectiveTransform(src, dst)
    warped = cv2.warpPerspective(image, matrix, _max_width_height(src))

    return warped