Python cv2 模块,BORDER_REFLECT_101 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用cv2.BORDER_REFLECT_101

项目:convnet-for-geometric-matching    作者:hjweide    | 项目源码 | 文件源码
def crop_transform(img, params):

    # take the center crop of the original image as I_{A}
    crop = center_crop(img, 227)

    M, = generate_transformations(
        1, (img.shape[0], img.shape[1]), **params
    )

    # apply T_{\theta_{GT}} to I_{A} to get I_{B}
    warp = cv2.warpAffine(
        crop.astype(np.float32), M[:2], (crop.shape[1], crop.shape[0]),
        flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101
    )

    return crop, warp, M
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def distort_affine_cv2(image, alpha_affine=10, random_state=None):
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([
        center_square + square_size,
        [center_square[0] + square_size, center_square[1] - square_size],
        center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)

    M = cv2.getAffineTransform(pts1, pts2)
    distorted_image = cv2.warpAffine(
        image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101)

    return distorted_image
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def distort_elastic_cv2(image, alpha=80, sigma=20, random_state=None):
    """Elastic deformation of images as per [Simard2003].
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape_size = image.shape[:2]

    # Downscaling the random grid and then upsizing post filter
    # improves performance. Approx 3x for scale of 4, diminishing returns after.
    grid_scale = 4
    alpha //= grid_scale  # Does scaling these make sense? seems to provide
    sigma //= grid_scale  # more similar end result when scaling grid used.
    grid_shape = (shape_size[0]//grid_scale, shape_size[1]//grid_scale)

    blur_size = int(4 * sigma) | 1
    rand_x = cv2.GaussianBlur(
        (random_state.rand(*grid_shape) * 2 - 1).astype(np.float32),
        ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
    rand_y = cv2.GaussianBlur(
        (random_state.rand(*grid_shape) * 2 - 1).astype(np.float32),
        ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
    if grid_scale > 1:
        rand_x = cv2.resize(rand_x, shape_size[::-1])
        rand_y = cv2.resize(rand_y, shape_size[::-1])

    grid_x, grid_y = np.meshgrid(np.arange(shape_size[1]), np.arange(shape_size[0]))
    grid_x = (grid_x + rand_x).astype(np.float32)
    grid_y = (grid_y + rand_y).astype(np.float32)

    distorted_img = cv2.remap(image, grid_x, grid_y,
        borderMode=cv2.BORDER_REFLECT_101, interpolation=cv2.INTER_LINEAR)

    return distorted_img
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomShiftScale(img, u=0.25, limit=4):
    if random.random() < u:
        height, width, channel = img.shape
        assert (width == height)
        size0 = width
        size1 = width + 2 * limit
        img1 = cv2.copyMakeBorder(img, limit, limit, limit, limit, borderType=cv2.BORDER_REFLECT_101)
        size = round(random.uniform(size0, size1))

        dx = round(random.uniform(0, size1 - size))  # pixel
        dy = round(random.uniform(0, size1 - size))

        y1 = dy
        y2 = y1 + size
        x1 = dx
        x2 = x1 + size

        if size == size0:
            img = img1[y1:y2, x1:x2, :]
        else:
            img = cv2.resize(img1[y1:y2, x1:x2, :], (size0, size0), interpolation=cv2.INTER_LINEAR)

    return img
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomShiftScaleRotate(img, shift_limit=0.0625, scale_limit=0.1, rotate_limit=45, u=0.5):
    if random.random() < u:
        height, width, channel = img.shape

        angle = random.uniform(-rotate_limit, rotate_limit)  # degree
        scale = random.uniform(1 - scale_limit, 1 + scale_limit)
        dx = round(random.uniform(-shift_limit, shift_limit)) * width
        dy = round(random.uniform(-shift_limit, shift_limit)) * height

        cc = math.cos(angle / 180 * math.pi) * (scale)
        ss = math.sin(angle / 180 * math.pi) * (scale)
        rotate_matrix = np.array([[cc, -ss], [ss, cc]])

        box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
        box1 = box0 - np.array([width / 2, height / 2])
        box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])

        box0 = box0.astype(np.float32)
        box1 = box1.astype(np.float32)
        mat = cv2.getPerspectiveTransform(box0, box1)
        img = cv2.warpPerspective(img, mat, (width, height), flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_REFLECT_101)  # cv2.BORDER_CONSTANT, borderValue = (0, 0, 0))  #cv2.BORDER_REFLECT_101

    return img
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomDistort1(img, distort_limit=0.35, shift_limit=0.25, u=0.5):
    if random.random() < u:
        height, width, channel = img.shape

        # debug
        # img = img.copy()
        # for x in range(0,width,10):
        #     cv2.line(img,(x,0),(x,height),(1,1,1),1)
        # for y in range(0,height,10):
        #     cv2.line(img,(0,y),(width,y),(1,1,1),1)

        k = random.uniform(-distort_limit, distort_limit) * 0.00001
        dx = random.uniform(-shift_limit, shift_limit) * width
        dy = random.uniform(-shift_limit, shift_limit) * height

        # map_x, map_y = cv2.initUndistortRectifyMap(intrinsics, dist_coeffs, None, None, (width,height),cv2.CV_32FC1)
        # https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion
        # https://stackoverflow.com/questions/10364201/image-transformation-in-opencv
        x, y = np.mgrid[0:width:1, 0:height:1]
        x = x.astype(np.float32) - width / 2 - dx
        y = y.astype(np.float32) - height / 2 - dy
        theta = np.arctan2(y, x)
        d = (x * x + y * y) ** 0.5
        r = d * (1 + k * d * d)
        map_x = r * np.cos(theta) + width / 2 + dx
        map_y = r * np.sin(theta) + height / 2 + dy

        img = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    return img


# http://pythology.blogspot.sg/2014/03/interpolation-on-regular-distorted-grid.html
## grid distortion
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def applyAffineTransform(self, src, srcTri, dstTri, size) :

        # Given a pair of triangles, find the affine transform.
        warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )

        # Apply the Affine Transform just found to the src image
        dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )

        return dst


    # Check if a point is inside a rectangle
项目:SemiSupervised_itterativeCNN    作者:styloInt    | 项目源码 | 文件源码
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))

    return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
项目:opencv-helpers    作者:abarrak    | 项目源码 | 文件源码
def frame(image, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT, color=[255, 0, 0]):
  '''
  add borders around :image:
  :param image: has to be in RBG color scheme. Use `convert_to_rgb` if it's in opencv BGR scheme.
  :param color: array representing an RGB color.
  :param borderType: Other options are:
                                    cv.BORDER_REFLECT,
                                    cv.BORDER_REFLECT_101,
                                    cv.BORDER_DEFAULT,
                                    cv.BORDER_REPLICATE,
                                    cv.BORDER_WRAP
  '''
  return cv.copyMakeBorder(image, top, bottom, left, right, borderType, value=color)
项目:FaceAnalysis    作者:ElliotSalisbury    | 项目源码 | 文件源码
def applyAffineTransform(src, srcTri, dstTri, dst) :
    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))

    # Apply the Affine Transform just found to the src image

    dst = cv2.warpAffine(src, warpMat, (dst.shape[1], dst.shape[0]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)

    return dst

# Warps and alpha blends triangular regions from img1 and img2 to img
项目:catFaceSwapSendToTodd    作者:Micasou    | 项目源码 | 文件源码
def applyAffineTransform(src, srcTri, dstTri, size) :

    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )

    # Apply the Affine Transform just found to the src image
    dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )

    return dst


# Check if a point is inside a rectangle
项目:pokerface    作者:LiuRoy    | 项目源码 | 文件源码
def applyAffineTransform(src, srcTri, dstTri, size) :

    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )

    # Apply the Affine Transform just found to the src image
    dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )

    return dst


# Warps and alpha blends triangular regions from img1 and img2 to img
项目:convnet-for-geometric-matching    作者:hjweide    | 项目源码 | 文件源码
def center_crop(img, length):
    if img.shape[0] < length or img.shape[1] < length:
        top = max(0, int(np.ceil((length - img.shape[0]) / 2.)))
        left = max(0, int(np.ceil((length - img.shape[1]) / 2.)))
        img = cv2.copyMakeBorder(
            img, top, top, left, left, borderType=cv2.BORDER_REFLECT_101
        )

    crop_y = int(np.floor((img.shape[0] - length) / 2.))
    crop_x = int(np.floor((img.shape[1] - length) / 2.))
    crop = img[crop_y:crop_y + length, crop_x:crop_x + length]

    return crop
项目:SkinLesionNeuralNetwork    作者:Neurality    | 项目源码 | 文件源码
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))

    return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomRotate(img, u=0.25, limit=90):
    if random.random() < u:
        angle = random.uniform(-limit, limit)  # degree

        height, width = img.shape[0:2]
        mat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1.0)
        img = cv2.warpAffine(img, mat, (height, width), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
        # img = cv2.warpAffine(img, mat, (height,width),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_CONSTANT)

    return img
项目:kaggle_amazon_from_space    作者:N01Z3    | 项目源码 | 文件源码
def randomShift(img, u=0.25, limit=4):
    if random.random() < u:
        dx = round(random.uniform(-limit, limit))  # pixel
        dy = round(random.uniform(-limit, limit))  # pixel

        height, width, channel = img.shape
        img1 = cv2.copyMakeBorder(img, limit + 1, limit + 1, limit + 1, limit + 1, borderType=cv2.BORDER_REFLECT_101)
        y1 = limit + 1 + dy
        y2 = y1 + height
        x1 = limit + 1 + dx
        x2 = x1 + width
        img = img1[y1:y2, x1:x2, :]

    return img
项目:dilation    作者:fyu    | 项目源码 | 文件源码
def predict(dataset_name, input_path, output_path):
    dataset = Dataset(dataset_name)
    net = caffe.Net(dataset.model_path, dataset.pretrained_path, caffe.TEST)
    label_margin = 186

    input_dims = net.blobs['data'].shape
    batch_size, num_channels, input_height, input_width = input_dims
    caffe_in = np.zeros(input_dims, dtype=np.float32)
    image = cv2.imread(input_path, 1).astype(np.float32) - dataset.mean_pixel

    image_size = image.shape
    output_height = input_height - 2 * label_margin
    output_width = input_width - 2 * label_margin
    image = cv2.copyMakeBorder(image, label_margin, label_margin,
                               label_margin, label_margin,
                               cv2.BORDER_REFLECT_101)

    num_tiles_h = image_size[0] // output_height + \
                  (1 if image_size[0] % output_height else 0)
    num_tiles_w = image_size[1] // output_width + \
                  (1 if image_size[1] % output_width else 0)

    prediction = []
    for h in range(num_tiles_h):
        col_prediction = []
        for w in range(num_tiles_w):
            offset = [output_height * h,
                      output_width * w]
            tile = image[offset[0]:offset[0] + input_height,
                         offset[1]:offset[1] + input_width, :]
            margin = [0, input_height - tile.shape[0],
                      0, input_width - tile.shape[1]]
            tile = cv2.copyMakeBorder(tile, margin[0], margin[1],
                                      margin[2], margin[3],
                                      cv2.BORDER_REFLECT_101)
            caffe_in[0] = tile.transpose([2, 0, 1])
            out = net.forward_all(**{net.inputs[0]: caffe_in})
            prob = out['prob'][0]
            col_prediction.append(prob)
        # print('concat row')
        col_prediction = np.concatenate(col_prediction, axis=2)
        prediction.append(col_prediction)
    prob = np.concatenate(prediction, axis=1)
    if dataset.zoom > 1:
        prob = util.interp_map(prob, dataset.zoom, image_size[1], image_size[0])
    prediction = np.argmax(prob.transpose([1, 2, 0]), axis=2)
    color_image = dataset.palette[prediction.ravel()].reshape(image_size)
    color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
    print('Writing', output_path)
    cv2.imwrite(output_path, color_image)
项目:dilation-tensorflow    作者:ndrplz    | 项目源码 | 文件源码
def predict(image, input_tensor, model, ds, sess):

    image = image.astype(np.float32) - CONFIG[ds]['mean_pixel']
    conv_margin = CONFIG[ds]['conv_margin']

    input_dims = (1,) + CONFIG[ds]['input_shape']
    batch_size, input_height, input_width, num_channels = input_dims
    model_in = np.zeros(input_dims, dtype=np.float32)

    image_size = image.shape
    output_height = input_height - 2 * conv_margin
    output_width = input_width - 2 * conv_margin
    image = cv2.copyMakeBorder(image, conv_margin, conv_margin,
                               conv_margin, conv_margin,
                               cv2.BORDER_REFLECT_101)

    num_tiles_h = image_size[0] // output_height + (1 if image_size[0] % output_height else 0)
    num_tiles_w = image_size[1] // output_width  + (1 if image_size[1] % output_width else 0)

    row_prediction = []
    for h in range(num_tiles_h):
        col_prediction = []
        for w in range(num_tiles_w):
            offset = [output_height * h,
                      output_width * w]
            tile = image[offset[0]:offset[0] + input_height,
                         offset[1]:offset[1] + input_width, :]
            margin = [0, input_height - tile.shape[0],
                      0, input_width - tile.shape[1]]
            tile = cv2.copyMakeBorder(tile, margin[0], margin[1],
                                      margin[2], margin[3],
                                      cv2.BORDER_REFLECT_101)

            model_in[0] = tile

            prob = sess.run(model, feed_dict={input_tensor: tile[None, ...]})[0]

            col_prediction.append(prob)

        col_prediction = np.concatenate(col_prediction, axis=1)  # previously axis=2
        row_prediction.append(col_prediction)
    prob = np.concatenate(row_prediction, axis=0)
    if CONFIG[ds]['zoom'] > 1:
        prob = interp_map(prob, CONFIG[ds]['zoom'], image_size[1], image_size[0])

    prediction = np.argmax(prob, axis=2)
    color_image = CONFIG[ds]['palette'][prediction.ravel()].reshape(image_size)

    return color_image
项目:FaceRecognition    作者:fonfonx    | 项目源码 | 文件源码
def warp_image(img, triangulation, base_points, coord):
    """
    Realize the mesh warping phase

    triangulation is the Delaunay triangulation of the base points
    base_points are the coordinates of the landmark poitns of the reference image

    code inspired from http://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
    """
    all_points, coordinates = preprocess_image_before_triangulation(img)
    img_out = 255 * np.ones(img.shape, dtype=img.dtype)
    for t in triangulation:
        # triangles to map one another
        src_tri = np.array([[all_points[x][0], all_points[x][1]] for x in t]).astype(np.float32)
        dest_tri = np.array([[base_points[x][0], base_points[x][1]] for x in t]).astype(np.float32)
        # bounding boxes
        src_rect = cv2.boundingRect(np.array([src_tri]))
        dest_rect = cv2.boundingRect(np.array([dest_tri]))

        # crop images
        src_crop_tri = np.zeros((3, 2), dtype=np.float32)
        dest_crop_tri = np.zeros((3, 2))
        for k in range(0, 3):
            for dim in range(0, 2):
                src_crop_tri[k][dim] = src_tri[k][dim] - src_rect[dim]
                dest_crop_tri[k][dim] = dest_tri[k][dim] - dest_rect[dim]

        src_crop_img = img[src_rect[1]:src_rect[1] + src_rect[3], src_rect[0]:src_rect[0] + src_rect[2]]

        # affine transformation estimation
        mat = cv2.getAffineTransform(
            np.float32(src_crop_tri),
            np.float32(dest_crop_tri)
        )
        dest_crop_img = cv2.warpAffine(
            src_crop_img,
            mat,
            (dest_rect[2], dest_rect[3]),
            None,
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_REFLECT_101
        )

        # Use a mask to keep only the triangle pixels
        # Get mask by filling triangle
        mask = np.zeros((dest_rect[3], dest_rect[2], 3), dtype=np.float32)
        cv2.fillConvexPoly(mask, np.int32(dest_crop_tri), (1.0, 1.0, 1.0), 16, 0)

        # Apply mask to cropped region
        dest_crop_img = dest_crop_img * mask

        # Copy triangular region of the rectangular patch to the output image
        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] * (
                (1.0, 1.0, 1.0) - mask)

        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] + dest_crop_img

    return img_out[coord[2]:coord[3], coord[0]:coord[1]]