Python cv2 模块,getAffineTransform() 实例源码

我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用cv2.getAffineTransform()

项目:prepare-faces-zyf    作者:walkoncross    | 项目源码 | 文件源码
def transform_and_crop_face(src_img, facial_pts, normalized_pts=dft_normalized_5points, crop_size=dft_crop_size):
    pts_dst = np.float32(normalized_pts)
    if pts_dst.shape[0]==2:
        pts_dst = pts_dst.transpose()

    pts_src = np.float32(facial_pts)
    if pts_src.shape[0]==2:
        pts_src = pts_src.transpose()

#    tfm = cv2.getAffineTransform(pts_src[0:3], pts_dst[0:3])
#    print('cv2.getAffineTransform returns tfm=\n' + str(tfm))
#    print('type(tfm):' + str(type(tfm)))
#    print('tfm.dtype:' + str(tfm.dtype))

    tfm = _get_transform_matrix(pts_src, pts_dst)
#    print('_get_transform_matrix returns tfm=\n' + str(tfm))
#    print('type(tfm):' + str(type(tfm)))
#    print('tfm.dtype:' + str(tfm.dtype))

    dst_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return dst_img
项目:prepare-faces-zyf    作者:walkoncross    | 项目源码 | 文件源码
def transform_and_crop_face(src_img, facial_pts, normalized_pts=dft_normalized_5points, crop_size=dft_crop_size):
    pts_dst = np.float32(normalized_pts)
    if pts_dst.shape[0]==2:
        pts_dst = pts_dst.transpose()

    pts_src = np.float32(facial_pts)
    if pts_src.shape[0]==2:
        pts_src = pts_src.transpose()

#    tfm = cv2.getAffineTransform(pts_src[0:3], pts_dst[0:3])
#    print('cv2.getAffineTransform returns tfm=\n' + str(tfm))
#    print('type(tfm):' + str(type(tfm)))
#    print('tfm.dtype:' + str(tfm.dtype))

    tfm = _get_transform_matrix(pts_src, pts_dst)
    print('_get_transform_matrix returns tfm=\n' + str(tfm))
    print('type(tfm):' + str(type(tfm)))
    print('tfm.dtype:' + str(tfm.dtype))

    dst_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return dst_img
项目:prepare-faces-zyf    作者:walkoncross    | 项目源码 | 文件源码
def warp_and_crop_face(src_img, facial_pts, normalized_pts=dft_normalized_5points, crop_size=dft_crop_size):
    pts_dst = np.float32(normalized_pts)
    if pts_dst.shape[0]==2:
        pts_dst = pts_dst.transpose()

    pts_src = np.float32(facial_pts)
    if pts_src.shape[0]==2:
        pts_src = pts_src.transpose()

#    tfm = cv2.getAffineTransform(pts_src[0:3], pts_dst[0:3])
#    print('cv2.getAffineTransform returns tfm=\n' + str(tfm))
#    print('type(tfm):' + str(type(tfm)))
#    print('tfm.dtype:' + str(tfm.dtype))

    tfm = _get_transform_matrix(pts_src, pts_dst)
#    print('_get_transform_matrix returns tfm=\n' + str(tfm))
#    print('type(tfm):' + str(type(tfm)))
#    print('tfm.dtype:' + str(tfm.dtype))

    dst_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return dst_img
项目:Computer-Vision    作者:PratikRamdasi    | 项目源码 | 文件源码
def affineTransform(self):
        folder=self.sort_files()
        P=self.get_points()
        self.height,self.width=cv2.imread("Frames/1.jpg").shape[:2]
        # Process frames
        for i in folder:
            pic="Frames/"+str(i)+".jpg"
            img = cv2.imread(pic)
            pts1 = np.float32([[P[0][0],P[0][1]],[P[1][0],P[1][1]],[P[2][0],P[2][1]]])
            pts2 = np.float32([[P[0][2],P[0][3]],[P[1][2],P[1][3]],[P[2][2],P[2][3]]])
            M = cv2.getAffineTransform(pts1,pts2)
            dst = cv2.warpAffine(img,M,(self.width,self.height))
            cv2.imwrite("Frames/%d.jpg" % i, dst)


    # Method for Perspective transformation: OpenCV Module
项目:iffse    作者:kendricktan    | 项目源码 | 文件源码
def align_face_to_template(img, facial_landmarks, output_dim, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP):
    """
    Aligns image by warping it to fit the landmarks on
    the image (src) to the landmarks on the template (dst)

    Args:
        img: src image to be aligned
        facial_landmarks: list of 68 landmarks (obtained from dlib)
        output_dim: image output dimension
    """
    np_landmarks = np.float32(facial_landmarks)
    np_landmarks_idx = np.array(landmarkIndices)

    H = cv2.getAffineTransform(np_landmarks[np_landmarks_idx],
                               output_dim * SCALED_LANDMARKS[np_landmarks_idx])
    warped = cv2.warpAffine(img, H, (output_dim, output_dim))

    return warped
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def distort_affine_cv2(image, alpha_affine=10, random_state=None):
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([
        center_square + square_size,
        [center_square[0] + square_size, center_square[1] - square_size],
        center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)

    M = cv2.getAffineTransform(pts1, pts2)
    distorted_image = cv2.warpAffine(
        image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101)

    return distorted_image
项目:phocnet    作者:ssudholt    | 项目源码 | 文件源码
def create_affine_transform_augmentation(img, random_limits=(0.8, 1.1)):
        '''
        Creates an augmentation by computing a homography from three
        points in the image to three randomly generated points
        '''
        y, x = img.shape[:2]
        fx = float(x)
        fy = float(y)
        src_point = np.float32([[fx/2, fy/3,],
                                [2*fx/3, 2*fy/3],
                                [fx/3, 2*fy/3]])
        random_shift = (np.random.rand(3,2) - 0.5) * 2 * (random_limits[1]-random_limits[0])/2 + np.mean(random_limits)
        dst_point = src_point * random_shift.astype(np.float32)
        transform = cv2.getAffineTransform(src_point, dst_point)
        borderValue = 0
        if img.ndim == 3:
            borderValue = np.median(np.reshape(img, (img.shape[0]*img.shape[1],-1)), axis=0)
        else:
            borderValue=np.median(img)
        warped_img = cv2.warpAffine(img, transform, dsize=(x,y), borderValue=borderValue)
        return warped_img
项目:QRCodeReader    作者:Griffintaur    | 项目源码 | 文件源码
def Transform(self, PointTop, PointRight, PointBottom):
        Point1 = [PointTop[0], PointTop[1]]
        Point2 = [PointRight[0], PointRight[1]]
        Point3 = [PointBottom[0], PointBottom[1]]
        src = np.float32([Point1, Point2, Point3])
        dest_pointTop = [40, 40]
        dest_pointRight = [140, 40]
        dest_pointBottom = [40, 140]
        destination = np.float32(
            [dest_pointTop, dest_pointRight, dest_pointBottom])
        affineTrans = cv.getAffineTransform(src, destination)
        self.TransformImage = cv.warpAffine(
            self.OriginalImage, affineTrans, self.OriginalImage.shape[:2])
        self.TransformImage = self.TransformImage[0:200, 0:200]
        #cv.imshow("TransformImage",self.TransformImage)            #uncomment to debug
        #cv.waitKey(0)
        #cv.destroyAllWindows()
        return self.TransformImage
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None
        assert bb is not None

        bb_dlib = dlib.rectangle(left=bb[0], top=bb[1], right=bb[2], bottom=bb[3])
        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb_dlib)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None
        assert bb is not None

        bb_dlib = dlib.rectangle(left=bb[0], top=bb[1], right=bb[2], bottom=bb[3])
        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb_dlib)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def applyAffineTransform(self, src, srcTri, dstTri, size) :

        # Given a pair of triangles, find the affine transform.
        warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )

        # Apply the Affine Transform just found to the src image
        dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )

        return dst


    # Check if a point is inside a rectangle
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def segment(self,img,box,landmarks):
        left,top,right,bottom = box
        left,top,right,bottom = int(left),int(top),int(right),int(bottom)
        bb = dlib.rectangle(left,top,right,bottom)
        H = cv2.getAffineTransform(landmarks,
                               self.imgDim * MINMAX_TEMPLATE[self.npLandmarkIndices] * self.scale + self.imgDim * (1 - self.scale)/2)
        thumbnail = cv2.warpAffine(img, H, (self.imgDim, self.imgDim))
        return [('2d-align',thumbnail)]
项目:SemiSupervised_itterativeCNN    作者:styloInt    | 项目源码 | 文件源码
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))

    return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
项目:face-preprocess-tools    作者:joyhuang9473    | 项目源码 | 文件源码
def alignment(filePath, points, ref_points):
    '''
    @brief: ??????????????
    '''
    assert(len(points) == len(ref_points))    
    num_point = len(ref_points) / 2
    #??????
    dst = np.empty((num_point, 2), dtype = np.int)
    k = 0
    for i in range(num_point):
        for j in range(2):
            dst[i][j] = ref_points[k]
            k = k+1
    #???????
    src = np.empty((num_point, 2), dtype = np.int)
    k = 0
    for i in range(num_point):
        for j in range(2):
            src[i][j] = points[k]
            k = k+1
    #???????????????????
    tfrom = tf.estimate_transform('affine', dst,src)
    #?opencv???,????????,????M
#    pts1 = np.float32([[src[0][0],src[0][1]],[src[1][0],src[1][1]],[src[2][0],src[2][1]]])
#    pts2 = np.float32([[dst[0][0],dst[0][1]],[dst[1][0],dst[1][1]],[dst[2][0],dst[2][1]]])
#    M = cv2.getAffineTransform(pts2,pts1)
    #?????????????    
    pts3 = np.float32([[src[0][0],src[0][1]],[src[1][0],src[1][1]],[src[2][0],src[2][1]],[src[3][0],src[3][1]],[src[4][0],src[4][1]]])
    pts4 = np.float32([[dst[0][0],dst[0][1]],[dst[1][0],dst[1][1]],[dst[2][0],dst[2][1]],[dst[3][0],dst[3][1]],[dst[4][0],dst[4][1]]])
    N = compute_affine_transform(pts4, pts3)
    #
    im = skimage.io.imread(filePath)

    if im.ndim == 3:
        rows, cols, ch = im.shape
    else:
        rows, cols = im.shape
    warpimage_cv2 = cv2.warpAffine(im, N, (cols, rows))
    warpimage = tf.warp(im, inverse_map = tfrom)

    return warpimage, warpimage_cv2
项目:prepare-faces-zyf    作者:walkoncross    | 项目源码 | 文件源码
def _get_transform_matrix(src_pts, dst_pts):
    #tfm = cv2.getAffineTransform(pts_src[0:3], pts_dst[0:3])
    tfm = np.float32([[1, 0, 0], [0, 1, 0]])
    n_pts = src_pts.shape[0]
    ones = np.ones((n_pts, 1), src_pts.dtype)
    src_pts_ = np.hstack([src_pts, ones])
    dst_pts_ = np.hstack([dst_pts, ones])

#    print('src_pts_:\n' + str(src_pts_))
#    print('dst_pts_:\n' + str(dst_pts_))

    A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)

    print('np.linalg.lstsq return A: \n' + str(A))
    print('np.linalg.lstsq return res: \n' + str(res))
    print('np.linalg.lstsq return rank: \n' + str(rank))
    print('np.linalg.lstsq return s: \n' + str(s))
#    tfm = np.float32([
#            [A[0,0], A[0,1], A[2, 0]],
#            [A[1,0], A[1,1], A[2, 1]]
#        ])

    if rank==3:
        tfm = np.float32([
                [A[0,0], A[1,0], A[2, 0]],
                [A[0,1], A[1,1], A[2, 1]]
            ])
    elif rank==2:
        tfm = np.float32([
            [A[0,0], A[1,0], 0],
            [A[0,1], A[1,1], 0]
            ])

    return tfm
项目:FaceAnalysis    作者:ElliotSalisbury    | 项目源码 | 文件源码
def applyAffineTransform(src, srcTri, dstTri, dst) :
    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))

    # Apply the Affine Transform just found to the src image

    dst = cv2.warpAffine(src, warpMat, (dst.shape[1], dst.shape[0]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)

    return dst

# Warps and alpha blends triangular regions from img1 and img2 to img
项目:catFaceSwapSendToTodd    作者:Micasou    | 项目源码 | 文件源码
def applyAffineTransform(src, srcTri, dstTri, size) :

    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )

    # Apply the Affine Transform just found to the src image
    dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )

    return dst


# Check if a point is inside a rectangle
项目:cancer_nn    作者:tanmoyopenroot    | 项目源码 | 文件源码
def shearImage(image_array, shear_value):
    rows, cols, ch = image_array.shape
    pts1 = np.float32([[5, 5], [20, 5], [5, 20]])
    pt1 = 5 + shear_value * np.random.uniform() - shear_value / 2
    pt2 = 20 + shear_value * np.random.uniform() - shear_value / 2
    pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])
    shear_matrix = cv2.getAffineTransform(pts1, pts2)
    shear_image = cv2.warpAffine(image_array, shear_matrix, (cols, rows))
    return shear_image
项目:pokerface    作者:LiuRoy    | 项目源码 | 文件源码
def applyAffineTransform(src, srcTri, dstTri, size) :

    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )

    # Apply the Affine Transform just found to the src image
    dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )

    return dst


# Warps and alpha blends triangular regions from img1 and img2 to img
项目:deepstreet    作者:alessiamarcolini    | 项目源码 | 文件源码
def transform_image(img,ang_range,shear_range,trans_range,brightness=0):
    '''
    This function transforms images to generate new images.
    The function takes in following arguments,
    1- Image
    2- ang_range: Range of angles for rotation
    3- shear_range: Range of values to apply affine transform to
    4- trans_range: Range of values to apply translations over.

    A Random uniform distribution is used to generate different parameters for transformation

    '''
    # Rotation
    ang_rot = np.random.uniform(ang_range)-ang_range/2
    rows,cols,ch = img.shape
    Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)

    # Translation
    tr_x = trans_range*np.random.uniform()-trans_range/2
    tr_y = trans_range*np.random.uniform()-trans_range/2
    Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])

    # Shear
    pts1 = np.float32([[5,5],[20,5],[5,20]])

    pt1 = 5+shear_range*np.random.uniform()-shear_range/2
    pt2 = 20+shear_range*np.random.uniform()-shear_range/2

    # Brightness
    pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])

    shear_M = cv2.getAffineTransform(pts1,pts2)

    img = cv2.warpAffine(img,Rot_M,(cols,rows))
    img = cv2.warpAffine(img,Trans_M,(cols,rows))
    img = cv2.warpAffine(img,shear_M,(cols,rows))

    if brightness == 1:
      img = augment_brightness_camera_images(img)

    return img
项目:deepstreet    作者:alessiamarcolini    | 项目源码 | 文件源码
def transform_image(img,ang_range,shear_range,trans_range,brightness=0):
    '''
    This function transforms images to generate new images.
    The function takes in following arguments,
    1- Image
    2- ang_range: Range of angles for rotation
    3- shear_range: Range of values to apply affine transform to
    4- trans_range: Range of values to apply translations over.

    A Random uniform distribution is used to generate different parameters for transformation

    '''
    # Rotation
    ang_rot = np.random.uniform(ang_range)-ang_range/2
    rows,cols,ch = img.shape
    Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)

    # Translation
    tr_x = trans_range*np.random.uniform()-trans_range/2
    tr_y = trans_range*np.random.uniform()-trans_range/2
    Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])

    # Shear
    pts1 = np.float32([[5,5],[20,5],[5,20]])

    pt1 = 5+shear_range*np.random.uniform()-shear_range/2
    pt2 = 20+shear_range*np.random.uniform()-shear_range/2

    # Brightness
    pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])

    shear_M = cv2.getAffineTransform(pts1,pts2)

    img = cv2.warpAffine(img,Rot_M,(cols,rows))
    img = cv2.warpAffine(img,Trans_M,(cols,rows))
    img = cv2.warpAffine(img,shear_M,(cols,rows))

    if brightness == 1:
      img = augment_brightness_camera_images(img)

    return img
项目:SkinLesionNeuralNetwork    作者:Neurality    | 项目源码 | 文件源码
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))

    return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
项目:age    作者:ly015    | 项目源码 | 文件源码
def align_face_3(im, key_points):
    '''
    Align face image by affine transfromation. The transformation matrix is computed by 
    3 pairs of points

    input:
        im: input image
        key_points: [(xi, yi)], list of 21-key-point or 106-key-point

    output:
        im_out
    '''

    key_points = np.array(key_points, dtype = np.float32)
    dst_points = np.array([[70.745, 112.0], [108.237, 112.0], [89.4324, 153.514]], dtype = np.float32)
    dst_sz = (178, 218)

    src_points = np.zeros((3, 2), dtype = np.float32)

    if key_points.shape[0] == 21:
        src_points[0] = key_points[16]
        src_points[1] = key_points[17]
        src_points[2] = (key_points[19] + key_points[20]) / 2.0
    elif key_points[0] == 106:
        src_points[0] = key_points[104]
        src_points[1] = key_points[105]
        src_points[2] = (key_points[84] + key_points[90]) / 2.0
    else:
        raise Exception('invalid number of face keypoints')

    trans_mat = cv2.getAffineTransform(src_points, dst_points)

    im_out = cv2.warpAffine(im, trans_mat, dsize = dst_sz, borderMode = cv2.BORDER_REPLICATE)

    return im_out
项目:Analog-Utility-Meter-Reader    作者:arjun372    | 项目源码 | 文件源码
def main():
    stream=urllib.urlopen(CAM_URL)
    bytes=''
    ts=time.time()
    while True:
        bytes+=stream.read(2048)
        a = bytes.find('\xff\xd8')
        b = bytes.find('\xff\xd9')
        if a==-1 or b==-1:
            continue

        # Frame available
        rtimestamp=time.time()
        jpg = bytes[a:b+2]
        bytes= bytes[b+2:]
        img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
        cv2.imshow('RAW',img)

        #ORB to get corresponding points
        kp, des = orb.detectAndCompute(img,None)
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des_ref,des)
        matches = sorted(matches, key = lambda x:x.distance)
        img3 = cv2.drawMatches(img_ref,kp_ref,img,kp,matches[:4], None,flags=2)
        cv2.imshow('Matches',img3)

#        pts_src = np.float32([[kp_ref[0].pt[0],kp_ref[0].pt[1]],[kp_ref[1].pt[0],kp_ref[1].pt[1]],[kp_ref[0].pt[0],kp_ref[0].pt[1]],[kp_ref[0].pt[0],kp_ref[0].pt[1]]
        # Perspective Transform
        pts1 = np.float32([[50,50],[200,50],[50,200]])
        pts2 = np.float32([[10,100],[200,50],[100,250]])
        Tr_M = cv2.getAffineTransform(pts1,pts2)
        oimg = cv2.warpAffine(img,Tr_M,(cols,rows))
        cv2.imshow('Perspective Transform',oimg)

        # Print lag
        print(time.time()-ts)
        ts=time.time()

        if cv2.waitKey(1) == 27:
            exit(0)
项目:faceNet_RealTime    作者:jack55436001    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb=None,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
              skipMulti=False, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)
            if bb is None:
                return

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:AlphaLogo    作者:gigaflw    | 项目源码 | 文件源码
def testset_generator(im):
    """
    Cast a variation of transform onto `im`, includes
            1. scale
            2. rotate
            3. affine
            4. gaussian blur
            5. reflect
    @yield: a tri-tuple
        (<name_of_the_transform>, <image_array>, <validation_function>)
        where <validation_function> is a function,
            receive a position (row, col), and return the new position after transform
    """
    def inner(m):
        return lambda r, c: np.dot(m, [c, r, 1])[::-1]

    rows, cols = im.shape

    # resize
    yield "scale1", cv2.resize(im, None, fx=0.5, fy=0.5), lambda r, c: (r/2, c/2)
    yield "scale2", cv2.resize(im, None, fx=2, fy=2), lambda r, c: (r*2, c*2)

    # rotate
    r_m1 = cv2.getRotationMatrix2D((cols/2, rows/2), -30, 1)
    yield "rotate_30", cv2.warpAffine(im, r_m1, (cols, rows)), inner(r_m1)

    r_m2 = cv2.getRotationMatrix2D((cols/2, rows/2), -45, 1)
    yield "rotate_45", cv2.warpAffine(im, r_m2, (cols, rows)), inner(r_m2)

    r_m3 = cv2.getRotationMatrix2D((cols/2, rows/2), -90, 1)
    yield "rotate_90", cv2.warpAffine(im, r_m3, (cols, rows)), inner(r_m3)

    # r_m4 = cv2.getRotationMatrix2D((cols/2, rows/2), -30, 0.7)
    # yield "rotate_30_scale_0.7", cv2.warpAffine(im, r_m4, (cols, rows)), inner(r_m4)    

    # r_m5 = cv2.getRotationMatrix2D((cols/2, rows/2), -30, 0.5)
    # yield "rotate_30_scale_0.5", cv2.warpAffine(im, r_m5, (cols, rows)), inner(r_m5)    

    # affine
    pts1 = np.array([[50, 50], [200, 50], [50, 200]], dtype=np.float32)
    pts2 = np.array([[10, 100], [200, 50], [100, 250]], dtype=np.float32)
    a_m = cv2.getAffineTransform(pts1, pts2)
    yield "affine", cv2.warpAffine(im, a_m, (cols, rows)), inner(a_m)

    # blur
    # yield "blur_11", cv2.GaussianBlur(im, (11, 11), 0), lambda r,c: (r,c)
    # yield "blur_31", cv2.GaussianBlur(im, (31, 31), 0), lambda r,c: (r,c)

    # reflect
    yield "reflect", im[:,::-1], lambda r,c : (r, cols-c)
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb=None,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
              skipMulti=False, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)
            if bb is None:
                return

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def alignImageAlongLine(img, line, height=15, length=None,
                        zoom=1, fast=False, borderValue=0):
    '''
    return a sub image aligned along given line

    @param img -  numpy.2darray input image to get subimage from
    @param line - list of 2 points [x0,y0,x1,y1])
    @param height - height of output array in y
    @param length - width of output array
    @param zoom - zoom factor
    @param fast - speed up calculation using nearest neighbour interpolation
    @returns transformed image as numpy.2darray with found line as in the middle
    '''

    height = int(round(height))
    if height % 2 == 0:  # ->is even number
        height += 1  # only take uneven numbers to have line in middle
    if length is None:
        length = int(round(ln.length(line)))
    hh = (height - 1)
    ll = (length - 1)

    # end points of the line:
    p0 = np.array(line[0:2], dtype=float)
    p1 = np.array(line[2:], dtype=float)
    # p2 is above middle of p0,p1:
    norm = np.array(ln.normal(line))
    if not ln.isHoriz(line):
        norm *= -1

    p2 = (p0 + p1) * 0.5 + norm * hh * 0.5
    middleY = hh / 2
    pp0 = [0, middleY]
    pp1 = [ll, middleY]
    pp2 = [ll * 0.5, hh]

    pts1 = np.array([p0, p1, p2], dtype=np.float32)
    pts2 = np.array([pp0, pp1, pp2], dtype=np.float32)

    if zoom != 1:
        length = int(round(length * zoom))
        height = int(round(height * zoom))
        pts2 *= zoom

    # TRANSFORM:
    M = cv2.getAffineTransform(pts1, pts2)
    dst = cv2.warpAffine(
        img, M, (length, height),
        flags=cv2.INTER_NEAREST if fast else cv2.INTER_LINEAR,
        borderValue=borderValue)
    return dst
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _process(self):
        w = self.display.widget
        img = w.image
        out = []
        v = self.pRef.value()

        if v == 'Reference points':
            # 2x3 point warp:
            r0, r1 = self._refPn
            pts0 = np.array([(h['pos'].y(), h['pos'].x())
                             for h in r0.handles]) + r0.pos()
            pts1 = np.array([(h['pos'].y(), h['pos'].x())
                             for h in r1.handles]) + r1.pos()
            # TODO: embed in PyerspectiveCorrection
            M = cv2.getAffineTransform(
                pts0.astype(
                    np.float32), pts1.astype(
                    np.float32))
            for n, i in enumerate(img):
                out.append(
                    # TODO: allow different image shapes
                    cv2.warpAffine(i, M, w.image.shape[1:3],
                                   borderValue=0))
        else:
            r = v == 'Reference image'
            e = self.pExecOn.value()
            for n, i in enumerate(img):
                if (e == 'all images' or
                        (e == 'current image' and n == w.currentIndex) or
                        (e == 'last image' and n == len(img) - 1)):
                    if not (r and n == self._refImg_from_own_display):
                        corr = self.pc.correct(i)

#                         if r and self.pSubPx.value():
#                             corr = subPixelAlignment(
#                                 corr, self._refImg,
#                                 niter=20,
#                                 grid=(self.pSubPx_y.value(),
#                                       self.pSubPx_x.value()),
#                                 method='smooth',
#                                 # maxGrad=2,
#                                 concentrateNNeighbours=self.pSubPx_neigh.value(),
#                                 maxDev=self.pSubPx_maxDev.value())[0]

                        out.append(corr)
        return out
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _process(self):
        w = self.display.widget
        img = w.image
        out = []
        v = self.pRef.value()

        if v == 'Reference points':
            # 2x3 point warp:
            r0, r1 = self._refPn
            pts0 = np.array([(h['pos'].y(), h['pos'].x())
                             for h in r0.handles]) + r0.pos()
            pts1 = np.array([(h['pos'].y(), h['pos'].x())
                             for h in r1.handles]) + r1.pos()
            # TODO: embed in PyerspectiveCorrection
            M = cv2.getAffineTransform(
                pts0.astype(
                    np.float32), pts1.astype(
                    np.float32))
            for n, i in enumerate(img):
                out.append(
                    # TODO: allow different image shapes
                    cv2.warpAffine(i, M, w.image.shape[1:3],
                                   borderValue=0))
        else:
            r = v == 'Reference image'
            e = self.pExecOn.value()
            for n, i in enumerate(img):
                if (e == 'all images' or
                        (e == 'current image' and n == w.currentIndex) or
                        (e == 'last image' and n == len(img) - 1)):
                    if not (r and n == self._refImg_from_own_display):
                        corr = self.pc.correct(i)

                        if r and self.pSubPx.value():
                            corr = subPixelAlignment(
                                corr, self._refImg,
                                niter=20,
                                grid=(self.pSubPx_y.value(),
                                      self.pSubPx_x.value()),
                                method='smooth',
                                # maxGrad=2,
                                concentrateNNeighbours=self.pSubPx_neigh.value(),
                                maxDev=self.pSubPx_maxDev.value())[0]

                        out.append(corr)
        return out
项目:facenet    作者:davidsandberg    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb=None,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
              skipMulti=False, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)
            if bb is None:
                return

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:EyesInTheSky    作者:SherineSameh    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb=None,landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP):
        """align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """



        #rasie assertion error if one of the following variables is
        #not passed to the function
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            bb = self.getLargestFaceBoundingBox(rgbImg)
            if bb is None:
                return

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices])
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:uct_atari    作者:5vision    | 项目源码 | 文件源码
def augment_image(image):

    # move channel to the last axis
    image = np.rollaxis(image, 0, 3)
    h, w, ch = image.shape[:3]

    # brightness
    brightness = random.uniform(-0.1, 0.1)

    # rotation and scaling
    rot = 1
    scale = 0.01
    Mrot = cv2.getRotationMatrix2D((h / 2, w / 2), random.uniform(-rot, rot), random.uniform(1.0 - scale, 1.0 + scale))

    # affine transform and shifts
    pts1 = np.float32([[0, 0], [w, 0], [w, h]])
    a = 1
    shift = 1
    shiftx = random.randint(-shift, shift)
    shifty = random.randint(-shift, shift)
    pts2 = np.float32([[
        0 + random.randint(-a, a) + shiftx,
        0 + random.randint(-a, a) + shifty
    ], [
        w + random.randint(-a, a) + shiftx,
        0 + random.randint(-a, a) + shifty
    ], [
        w + random.randint(-a, a) + shiftx,
        h + random.randint(-a, a) + shifty
    ]])
    M = cv2.getAffineTransform(pts1, pts2)

    def _augment(image):
        image = np.add(image, brightness)

        augmented = cv2.warpAffine(
            cv2.warpAffine(
                image
                , Mrot, (w, h)
            )
            , M, (w, h)
        )

        if augmented.ndim < 3:
            augmented = np.expand_dims(augmented, 2)

        return augmented

    # make same transform for each channel, splitting image by four channels
    image_lst = [image[..., i:i+4] for i in xrange(0, ch, 4)]
    augmented_lst = map(_augment, image_lst)
    augmented = np.concatenate(augmented_lst, axis=-1)

    # roll channel axis back when returning
    augmented = np.rollaxis(augmented, 2, 0)

    return augmented
项目:real-time-face-recognition    作者:iwantooxxoox    | 项目源码 | 文件源码
def align(self, imgDim, rgbImg, bb=None,
              landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
              skipMulti=False, scale=1.0):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :param skipMulti: Skip image if more than one face detected.
        :type skipMulti: bool
        :param scale: Scale image before cropping to the size given by imgDim.
        :type scale: float
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            bb = self.getLargestFaceBoundingBox(rgbImg, skipMulti)
            if bb is None:
                return

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        #pylint: disable=maybe-no-member
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                   imgDim * MINMAX_TEMPLATE[npLandmarkIndices]*scale + imgDim*(1-scale)/2)
        thumbnail = cv2.warpAffine(rgbImg, H, (imgDim, imgDim))

        return thumbnail
项目:FaceRecognition    作者:fonfonx    | 项目源码 | 文件源码
def warp_image(img, triangulation, base_points, coord):
    """
    Realize the mesh warping phase

    triangulation is the Delaunay triangulation of the base points
    base_points are the coordinates of the landmark poitns of the reference image

    code inspired from http://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
    """
    all_points, coordinates = preprocess_image_before_triangulation(img)
    img_out = 255 * np.ones(img.shape, dtype=img.dtype)
    for t in triangulation:
        # triangles to map one another
        src_tri = np.array([[all_points[x][0], all_points[x][1]] for x in t]).astype(np.float32)
        dest_tri = np.array([[base_points[x][0], base_points[x][1]] for x in t]).astype(np.float32)
        # bounding boxes
        src_rect = cv2.boundingRect(np.array([src_tri]))
        dest_rect = cv2.boundingRect(np.array([dest_tri]))

        # crop images
        src_crop_tri = np.zeros((3, 2), dtype=np.float32)
        dest_crop_tri = np.zeros((3, 2))
        for k in range(0, 3):
            for dim in range(0, 2):
                src_crop_tri[k][dim] = src_tri[k][dim] - src_rect[dim]
                dest_crop_tri[k][dim] = dest_tri[k][dim] - dest_rect[dim]

        src_crop_img = img[src_rect[1]:src_rect[1] + src_rect[3], src_rect[0]:src_rect[0] + src_rect[2]]

        # affine transformation estimation
        mat = cv2.getAffineTransform(
            np.float32(src_crop_tri),
            np.float32(dest_crop_tri)
        )
        dest_crop_img = cv2.warpAffine(
            src_crop_img,
            mat,
            (dest_rect[2], dest_rect[3]),
            None,
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_REFLECT_101
        )

        # Use a mask to keep only the triangle pixels
        # Get mask by filling triangle
        mask = np.zeros((dest_rect[3], dest_rect[2], 3), dtype=np.float32)
        cv2.fillConvexPoly(mask, np.int32(dest_crop_tri), (1.0, 1.0, 1.0), 16, 0)

        # Apply mask to cropped region
        dest_crop_img = dest_crop_img * mask

        # Copy triangular region of the rectangular patch to the output image
        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] * (
                (1.0, 1.0, 1.0) - mask)

        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] + dest_crop_img

    return img_out[coord[2]:coord[3], coord[0]:coord[1]]