Python cv2 模块,BORDER_TRANSPARENT 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用cv2.BORDER_TRANSPARENT

项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def warp_image(img, tM, shape):
    out = np.zeros(shape, dtype=img.dtype)
    # cv2.warpAffine(img,
    #                tM[:2],
    #                (shape[1], shape[0]),
    #                dst=out,
    #                borderMode=cv2.BORDER_TRANSPARENT,
    #                flags=cv2.WARP_INVERSE_MAP)
    cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
                        borderMode=cv2.BORDER_TRANSPARENT,
                        flags=cv2.WARP_INVERSE_MAP)
    return out

# TODO: Modify this method to get a better face contour mask
项目:FaceSwapper    作者:QuantumLiu    | 项目源码 | 文件源码
def warp_im(self,im, M, dshape):
        '''
        ????????
        '''
        output_im = np.zeros(dshape, dtype=im.dtype)
        cv2.warpAffine(im,
                       M[:2],
                       (dshape[1], dshape[0]),
                       dst=output_im,
                       borderMode=cv2.BORDER_TRANSPARENT,
                       flags=cv2.WARP_INVERSE_MAP)
        return output_im
项目:masks-and-hats    作者:leoneckert    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:masks-and-hats    作者:leoneckert    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = np.ones(dshape, dtype=im.dtype)*0
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,M[:2],(dshape[1], dshape[0]),dst=output_im,borderMode=cv2.BORDER_TRANSPARENT,flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:Automatic_Group_Photography_Enhancement    作者:Yuliang-Zou    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:photo-a-day-aligner    作者:matthewearl    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:mask-generator    作者:antiboredom    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:faceSwapPython    作者:arijitx    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:facemash-workshop    作者:leoneckert    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = np.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:facemash-workshop    作者:leoneckert    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = np.ones(dshape, dtype=im.dtype)*255
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:SwitchFace    作者:messcode    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    """
    Affine transformation with matrix M to dshape.
    """
    output_im = numpy.zeros(dshape, dtype=im.dtype)  # zero matrix
    cv2.warpAffine(im,
                   M[:2], # shape of M
                   (dshape[1], dshape[0]),
                   dst = output_im,
                   borderMode = cv2.BORDER_TRANSPARENT,
                   flags = cv2.WARP_INVERSE_MAP)
    return output_im
项目:deezer-album-face-swap    作者:xbenji    | 项目源码 | 文件源码
def warp_im(im, M, dshape):
    output_im = numpy.zeros(dshape, dtype=im.dtype)
    cv2.warpAffine(im,
                   M[:2],
                   (dshape[1], dshape[0]),
                   dst=output_im,
                   borderMode=cv2.BORDER_TRANSPARENT,
                   flags=cv2.WARP_INVERSE_MAP)
    return output_im
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def __next__(self):
        # THE IMAGED OBJECT WILL BE AVERAGED FROM ALL
        # INDIVITUAL IMAGES SHOWING THIS OBJECT FROM DIFFERENT POSITIONS:
        obj = MaskedMovingAverage(shape=self.obj_shape)

        with np.errstate(divide='ignore', invalid='ignore'):
            for f, h in zip(self.fits, self.Hinvs):
                warpedflatField = cv2.warpPerspective(self.flatField,
                                                      h, (f.shape[1], f.shape[0]))
                obj.update(f / warpedflatField, warpedflatField != 0)

        self.object = obj.avg

        # THE NEW flatField WILL BE OBTAINED FROM THE WARPED DIVIDENT
        # BETWEEN ALL IMAGES THE THE ESTIMATED IMAGE OOBJECT
        sh = self.flatField.shape
        s = MaskedMovingAverage(shape=sh)

        for f, mask, h in zip(self.fits, self._fit_masks, self.Hs):
            div = f / self.object
            # ->do not interpolate between background and image border
            div[mask] = np.nan
            div = cv2.warpPerspective(div, h, (sh[1], sh[0]),  # borderMode=cv2.BORDER_TRANSPARENT
                                      )
            div = np.nan_to_num(div)
            s.update(div, div != 0)

        new_flatField = s.avg

        # STOP ITERATION?
        # RMSE excluding NaNs:
        dev = np.nanmean((new_flatField[::10, ::10] -
                          self.flatField[::10, ::10])**2)**0.5
        print('residuum: %s' % dev)
        if self.n >= self.maxIter or (self._last_dev and (
                (self.n > 4 and dev > self._last_dev) or
                dev < self.maxDev)):
            raise StopIteration

        # remove erroneous values:
        self.flatField = np.clip(new_flatField, 0, 1)

        self.n += 1
        self._last_dev = dev
        return self.n