Python cv2 模块,getRotationMatrix2D() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.getRotationMatrix2D()

项目:human-pose-estimation-by-deep-learning    作者:HYPJUDY    | 项目源码 | 文件源码
def _random_roate(self, images, labels, degree):
        if(images.shape[0] != labels.shape[0]):
            raise Exception("Batch size Error.")
        degree = degree * math.pi / 180
        rand_degree = np.random.uniform(-degree, degree, images.shape[0])

        o_images = np.zeros_like(images)
        o_labels = np.zeros_like(labels)
        for idx in xrange(images.shape[0]):
            theta = rand_degree[idx]

            # labels
            for ii in xrange(self.points_num):
                o_labels[idx, 2*ii: 2*ii+2] = self._rotate(labels[idx, ii*2: 2*ii+2], theta)

            # image
            M = cv2.getRotationMatrix2D((self.img_width/2,self.img_height/2),-theta*180/math.pi,1)
            o_images[idx] = np.expand_dims(cv2.warpAffine(images[idx],M,(self.img_width,self.img_height)), axis=2)

        return o_images, o_labels
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def rotate_image(mat, angle):
    height, width = mat.shape[:2]
    image_center = (width / 2, height / 2)

    rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1)

    radians = math.radians(angle)
    sin = math.sin(radians)
    cos = math.cos(radians)
    bound_w = int((height * abs(sin)) + (width * abs(cos)))
    bound_h = int((height * abs(cos)) + (width * abs(sin)))

    rotation_mat[0, 2] += ((bound_w / 2) - image_center[0])
    rotation_mat[1, 2] += ((bound_h / 2) - image_center[1])

    rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
    return rotated_mat
项目:human-pose-estimation-by-deep-learning    作者:HYPJUDY    | 项目源码 | 文件源码
def _batch_random_roate(self, images, labels, degree):
        if(images.shape[0] != labels.shape[0]):
            raise Exception("Batch size Error.")
        degree = degree * math.pi / 180
        rand_degree = np.random.uniform(-degree, degree)

        o_images = np.zeros_like(images)
        o_labels = np.zeros_like(labels)
        for idx in xrange(images.shape[0]):
            theta = rand_degree

            # labels
            for ii in xrange(self.points_num):
                o_labels[idx, 2*ii: 2*ii+2] = self._rotate(labels[idx, ii*2: 2*ii+2], theta)

            # image
            M = cv2.getRotationMatrix2D((self.img_width/2,self.img_height/2),-theta*180/math.pi,1)
            o_images[idx] = np.expand_dims(cv2.warpAffine(images[idx],M,(self.img_width,self.img_height)), axis=2)

        return o_images, o_labels
项目:SemiSupervised_itterativeCNN    作者:styloInt    | 项目源码 | 文件源码
def data_augmentation(im, label):
    rotatation_angle = [-20, -10, 0, 10, 20]
    translate_x = [-15, -10, 0, 10, 15]
    translate_y = [-15, -10, 0, 10, 15]

    angle = random.choice(rotatation_angle)
    tx = random.choice(translate_x)
    ty = random.choice(translate_y)

    rows, cols = im.shape
    M_rotate = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)

    M_translate = np.float32([[1,0,tx],[0,1,ty]])
    im = cv2.warpAffine(im, M_translate,(cols,rows))
    label = cv2.warpAffine(label,M_translate,(cols,rows))

    im = cv2.warpAffine(im,M_rotate,(cols,rows))
    label = cv2.warpAffine(label, M_rotate,(cols,rows))

    return im, label
项目:CVtools    作者:Tyler-D    | 项目源码 | 文件源码
def augmentate(self):
        angles = [45, 90, 135, 180, 225, 270, 315]
        scale = 1.0
        for img in self.images:
            print "image shape : ", img.shape
            w = img.shape[1]
            h = img.shape[0]
            img_vmirror = cv2.flip(img,1)
            skimage.io.imsave("testv"+".jpg", img_vmirror )
            for angle in angles:
            #rangle = np.deg2rad(angle)
            # nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale
            # nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale
                rot_mat = cv2.getRotationMatrix2D((w*0.5, h*0.5), angle, scale)
            # rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
            # rot_mat[0,2] += rot_move[0]
            # rot_mat[1,2] += rot_move[1]
                new_img = cv2.warpAffine(img, rot_mat, (int(math.ceil(w)), int(math.ceil(h))), flags=cv2.INTER_LANCZOS4)
                skimage.io.imsave("test"+str(angle)+".jpg", new_img)
                new_img_vmirror = cv2.flip(new_img, 1)
                skimage.io.imsave("testv"+str(angle)+".jpg", new_img_vmirror)
                # img_rmirror = cv2.flip(new_img, 0)
                # skimage.io.imsave("testh"+str(angle)+".jpg", img_rmirror)
项目:CVtools    作者:Tyler-D    | 项目源码 | 文件源码
def load_and_augmentate(self, root):
        angles = [45, 90, 135, 180, 225, 270, 315]
        scale = 1.0
        for img_dir in os.listdir(root):
            img_dir_path = os.path.join(root, img_dir)
            for img in os.listdir(img_dir_path):
                img_path = os.path.join(img_dir_path, img)
                image = caffe.io.load_image(img_path,color=True)
                w = image.shape[1]
                h = image.shape[0]
                img_name = img.split(".")[0]
                img_type = img.split(".")[-1]
                img_vmirror = cv2.flip(image,1)
                img_vmirror_path = os.path.join(img_dir_path,img_name+"_v."+img_type)
                skimage.io.imsave(img_vmirror_path, img_vmirror )
                for angle in angles:
                    rot_mat = cv2.getRotationMatrix2D((w*0.5, h*0.5), angle, scale)
                    new_img = cv2.warpAffine(image, rot_mat, (int(math.ceil(w)), int(math.ceil(h))), flags=cv2.INTER_LANCZOS4)
                    new_img_path = os.path.join(img_dir_path,img_name+"_"+str(angle)+"."+img_type)
                    skimage.io.imsave(new_img_path, new_img)
                    new_img_vmirror = cv2.flip(new_img, 1)
                    new_img_vmirror_path = os.path.join(img_dir_path, img_name+"_"+str(angle)+"_v."+img_type)
                    skimage.io.imsave(new_img_vmirror_path, new_img_vmirror)
项目:dlcv_for_beginners    作者:frombeijingwithlove    | 项目源码 | 文件源码
def rotate_image(img, angle, crop):
    h, w = img.shape[:2]
    angle %= 360
    M_rotate = cv2.getRotationMatrix2D((w/2, h/2), angle, 1)
    img_rotated = cv2.warpAffine(img, M_rotate, (w, h))

    if crop:
        angle_crop = angle % 180
        if angle_crop > 90:
            angle_crop = 180 - angle_crop
        theta = angle_crop * np.pi / 180.0
        hw_ratio = float(h) / float(w)
        tan_theta = np.tan(theta)
        numerator = np.cos(theta) + np.sin(theta) * tan_theta
        r = hw_ratio if h > w else 1 / hw_ratio
        denominator = r * tan_theta + 1
        crop_mult = numerator / denominator
        w_crop = int(round(crop_mult*w))
        h_crop = int(round(crop_mult*h))
        x0 = int((w-w_crop)/2)
        y0 = int((h-h_crop)/2)

        img_rotated = crop_image(img_rotated, x0, y0, w_crop, h_crop)

    return img_rotated
项目:Simple-User-Input-Sculpture-Generation    作者:ClaireKincaid    | 项目源码 | 文件源码
def create_image_matrix(self, degrees=180):
        """
        This creates a 3d matrix of an image with rotations acting in the xy plane
        This code is not yet integrated into the menu, but it works. It needs
        to be able to take user text input to create transformation matrices that 
        can act on any volume data.
        """

        width = self.matrix_size
        rows,cols = self.img_cp.shape   #Image cp is the compressed image. 
        v = np.zeros((width, width, width))


        for z in range(width):
            M = cv2.getRotationMatrix2D((cols/2,rows/2),z*degrees/width,1)      #This finds the rotation matirx
            dyn_img = cv2.resize(image, (int(np.cos(z/width)*width+10), width-z+10))        #Resizes the image throughout the z axis based on a mathematical function.
            dst = cv2.warpAffine(dyn_img, M,(cols/2,rows/2))                    #This applies the rotation matrix to the image.

            v[:][z][:] += cv2.warpAffine(dyn_img,M,(cols,rows)) 

        v = np.lib.pad(v, ((1,1),(1,1),(1,1)), 'constant') #This padds the z axis with zero's arrays so that a closed shape is produced by create_iso_surface.
        return v
项目:3dstools    作者:ObsidianX    | 项目源码 | 文件源码
def _rotate_image(self, mat, angle, width, height):
        big = max(width, height)
        small = min(width, height)
        center = (big / 2.0) - (small / 2.0)

        trans = numpy.float32([[1, 0, 0], [0, 1, 0]])
        trans2 = numpy.float32([[1, 0, 0], [0, 1, 0]])

        if small == width:
            trans[0, 2] = center
            trans2[1, 2] = -center - 1
        else:
            trans[1, 2] = center
            trans2[0, 2] = -center - 1

        # first enlarge the image to a square, translating the pixels to the new center
        mat = cv2.warpAffine(mat, trans, (big, big))
        # then rotate on the new center
        rot = cv2.getRotationMatrix2D((big / 2, big / 2), angle, 1)
        mat = cv2.warpAffine(mat, rot, (big, big))
        # finally translate back to the start and resize to the new size
        return cv2.warpAffine(mat, trans2, (height, width))
项目:ppap_detect    作者:ashitani    | 项目源码 | 文件源码
def rotate_image(img_src, angle,scale ,crop=True):
    img_src,size_dest= pad_image(img_src,scale)

    size = tuple(np.array([img_src.shape[1], img_src.shape[0]]))
    org_h=size[1]
    org_w=size[0]

    src_r = np.sqrt((size[0]/2.0)**2+(size[1]/2.0)**2)
    org_angle =np.arctan(float(org_h)/org_w)

    dest_h = size_dest[0]
    dest_w = size_dest[1]

    center = tuple(np.array([img_src.shape[1] * 0.5, img_src.shape[0] * 0.5]))

    dsize= (dest_w,dest_h)
    rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
    img_rot = cv2.warpAffine(img_src, rotation_matrix, size, flags=cv2.INTER_CUBIC)

    if crop:
        x,y,w,h = cv2.boundingRect(img_rot[:,:,3])
        return img_rot[y:y+h, x:x+w,:]
    else:
        return img_rot
项目:ppap_detect    作者:ashitani    | 项目源码 | 文件源码
def rotate_image(img_src, angle,scale ):
    img_src,size_dest= pad_image(img_src,scale)

    size = tuple(np.array([img_src.shape[1], img_src.shape[0]]))
    org_h=size[1]
    org_w=size[0]

    src_r = np.sqrt((size[0]/2.0)**2+(size[1]/2.0)**2)
    org_angle =np.arctan(float(org_h)/org_w)

    dest_h = size_dest[0]
    dest_w = size_dest[1]

    center = tuple(np.array([img_src.shape[1] * 0.5, img_src.shape[0] * 0.5]))

    dsize= (dest_w,dest_h)
    rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
    img_rot = cv2.warpAffine(img_src, rotation_matrix, size, flags=cv2.INTER_CUBIC)

    x,y,w,h = cv2.boundingRect(img_rot[:,:,3])
    return img_rot[y:y+h, x:x+w,:]
项目:face2movie    作者:Stunkymonkey    | 项目源码 | 文件源码
def matrixPicture(face, eyes, height, width):
    """calculation of rotation and movement of the image"""
    center = tuple((face[0] + (face[2] / 2), face[1] + (face[3] / 2)))

    moveMatrix = np.float32([[1, 0, (width / 2) - center[0]],
                             [0, 1, (height / 2) - center[1]]])

    scale = float(min(height, width)) / float(face[2]) * facescale

    eye1 = tuple((eyes[0][0] + (eyes[0][2] / 2),
                  eyes[0][1] + (eyes[0][3] / 2)))
    eye2 = tuple((eyes[1][0] + (eyes[1][2] / 2),
                  eyes[1][1] + (eyes[1][3] / 2)))

    x = (float(eye2[0]) - float(eye1[0]))
    y = (float(eye2[1]) - float(eye1[1]))

    if x == 0:
        angle = 0
    else:
        angle = atan(y / x) * 180 / pi

    rotMatrix = cv2.getRotationMatrix2D(center, angle, scale)

    return moveMatrix, rotMatrix
项目:ssd_keras    作者:pierluigiferrari    | 项目源码 | 文件源码
def _scale(image, min=0.9, max=1.1):
    '''
    Scale the input image by a random factor picked from a uniform distribution
    over [min, max].

    Returns:
        The scaled image, the associated warp matrix, and the scaling value.
    '''

    rows,cols,ch = image.shape

    #Randomly select a scaling factor from the range passed.
    scale = np.random.uniform(min, max)

    M = cv2.getRotationMatrix2D((cols/2,rows/2), 0, scale)
    return cv2.warpAffine(image, M, (cols, rows)), M, scale
项目:deepstacks    作者:guoxuesong    | 项目源码 | 文件源码
def random_rotate(w,h,angle,scale,*all_inputs):
    if type(angle)==float:
        angle=(-angle,angle)
    if type(scale)==float:
        scale=(1-scale,1+scale)
    cx=(np.random.rand(len(all_inputs[0])).astype(floatX))*w
    cy=(np.random.rand(len(all_inputs[0])).astype(floatX))*h
    actions=(np.random.rand(len(all_inputs[0]),4,1,1)).astype(floatX)
    actions2=np.zeros_like(actions)
    actions2[:,0]=(actions[:,0]*(angle[1]-angle[0])+angle[0]).astype(floatX)
    actions2[:,1]=(actions[:,1]*(scale[1]-scale[0])+scale[0]).astype(floatX)
    actions2[:,2,0,0]=cx
    actions2[:,3,0,0]=cy
    all_outputs=[]
    for inputs in all_inputs:
        outputs=np.zeros(inputs.shape,dtype=floatX)
        for i in range(len(inputs)):
            mat = cv2.getRotationMatrix2D((cx[i],cy[i]),actions2[i,0,0,0],actions2[i,1,0,0])
            tmp = cv2.warpAffine(inputs[i].transpose(1,2,0),mat,inputs[i].shape[1:]).transpose(2,0,1)
            #tmp=np.pad(inputs[i:i+1],((0,0),(0,0),(n,n),(n,n)),mode='constant',constant_values=0)
            #tmp=np.roll(tmp,actions2[i,0,0,0],2)
            #tmp=np.roll(tmp,actions2[i,1,0,0],3)
            outputs[i]=tmp
        all_outputs+=[outputs]
    return all_outputs+[actions2.reshape(len(inputs),4)]
项目:sceneReco    作者:yijiuzai    | 项目源码 | 文件源码
def dumpRotateImage(img,degree,pt1,pt2,pt3,pt4):
    height,width=img.shape[:2]
    heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
    widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
    matRotation=cv2.getRotationMatrix2D((width/2,height/2),degree,1)
    matRotation[0, 2] += (widthNew - width) / 2
    matRotation[1, 2] += (heightNew - height) / 2
    imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
    pt1 = list(pt1)
    pt3 = list(pt3)


    [[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
    [[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
    imgOut=imgRotation[int(pt1[1]):int(pt3[1]),int(pt1[0]):int(pt3[0])]
    height,width=imgOut.shape[:2]
    return imgOut
项目:kaggle_ndsb2017    作者:juliandewit    | 项目源码 | 文件源码
def random_rotate_img(img, chance, min_angle, max_angle):
    import cv2
    if random.random() > chance:
        return img
    if not isinstance(img, list):
        img = [img]

    angle = random.randint(min_angle, max_angle)
    center = (img[0].shape[0] / 2, img[0].shape[1] / 2)
    rot_matrix = cv2.getRotationMatrix2D(center, angle, scale=1.0)

    res = []
    for img_inst in img:
        img_inst = cv2.warpAffine(img_inst, rot_matrix, dsize=img_inst.shape[:2], borderMode=cv2.BORDER_CONSTANT)
        res.append(img_inst)
    if len(res) == 0:
        res = res[0]
    return res
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def create_rotated_sub_image(image, centre, search_width, angle_rad):
    # Rotation transform requires x then y.
    M = cv2.getRotationMatrix2D((centre[1], centre[0]), np.rad2deg(angle_rad), 1.0)

    w = image.shape[1]
    h = centre[0] + int((image.shape[0] - centre[0]) * abs(math.sin(angle_rad)))
    rotated = cv2.warpAffine(image, M, (w, h))

    # Centre the last white centroid into the centre of the image.
    half_sub_image_width = int(min(min(search_width, centre[1]),
                                   min(rotated.shape[1] - centre[1], search_width)))

    sub_image = rotated[centre[0]:,
                centre[1] - half_sub_image_width: centre[1] + half_sub_image_width]

    return sub_image
项目:Notes2ppt    作者:gsengupta2810    | 项目源码 | 文件源码
def rotate_bound(image, angle):
    # grab the dimensions of the image and then determine the
    # center
    (h, w) = image.shape[:2]
    (cX, cY) = (w // 2, h // 2)

    # grab the rotation matrix (applying the negative of the
    # angle to rotate clockwise), then grab the sine and cosine
    # (i.e., the rotation components of the matrix)
    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    # compute the new bounding dimensions of the image
    nW = int((h * sin) + (w * cos))
    nH = int((h * cos) + (w * sin))

    # adjust the rotation matrix to take into account translation
    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY

    # perform the actual rotation and return the image
    return cv2.warpAffine(image, M, (nW, nH))
项目:pose_estimation    作者:JakeRenn    | 项目源码 | 文件源码
def _random_roate(self, images, labels, degree):
        if(images.shape[0] != labels.shape[0]):
            raise Exception("Batch size Error.")
        degree = degree * math.pi / 180
        rand_degree = np.random.uniform(-degree, degree, images.shape[0])

        o_images = np.zeros_like(images)
        o_labels = np.zeros_like(labels)
        for idx in xrange(images.shape[0]):
            theta = rand_degree[idx]

            # labels
            for ii in xrange(self.points_num):
                o_labels[idx, 2*ii: 2*ii+2] = self._rotate(labels[idx, ii*2: 2*ii+2], theta)

            # image
            M = cv2.getRotationMatrix2D((self.img_width/2,self.img_height/2),-theta*180/math.pi,1)
            o_images[idx] = np.expand_dims(cv2.warpAffine(images[idx],M,(self.img_width,self.img_height)), axis=2)

        return o_images, o_labels
项目:pose_estimation    作者:JakeRenn    | 项目源码 | 文件源码
def _batch_random_roate(self, images, labels, degree):
        if(images.shape[0] != labels.shape[0]):
            raise Exception("Batch size Error.")
        degree = degree * math.pi / 180
        rand_degree = np.random.uniform(-degree, degree)

        o_images = np.zeros_like(images)
        o_labels = np.zeros_like(labels)
        for idx in xrange(images.shape[0]):
            theta = rand_degree

            # labels
            for ii in xrange(self.points_num):
                o_labels[idx, 2*ii: 2*ii+2] = self._rotate(labels[idx, ii*2: 2*ii+2], theta)

            # image
            M = cv2.getRotationMatrix2D((self.img_width/2,self.img_height/2),-theta*180/math.pi,1)
            o_images[idx] = np.expand_dims(cv2.warpAffine(images[idx],M,(self.img_width,self.img_height)), axis=2)

        return o_images, o_labels
项目:seglink    作者:dengdan    | 项目源码 | 文件源码
def rotate_oriented_bbox_to_horizontal(center, bbox):
    """
    Step 2 of Figure 5 in seglink paper

    Rotate bbox horizontally along a `center` point
    Args:
        center: the center of rotation
        bbox: [cx, cy, w, h, theta]
    """
    assert np.shape(center) == (2, ), "center must be a vector of length 2"
    assert np.shape(bbox) == (5, ) or np.shape(bbox) == (4, ), "bbox must be a vector of length 4 or 5"
    bbox = np.asarray(bbox.copy(), dtype = np.float32)

    cx, cy, w, h, theta = bbox;
    M = cv2.getRotationMatrix2D(center, theta, scale = 1) # 2x3

    cx, cy = np.dot(M, np.transpose([cx, cy, 1]))

    bbox[0:2] = [cx, cy] 
    return bbox
项目:seglink    作者:dengdan    | 项目源码 | 文件源码
def rotate_horizontal_bbox_to_oriented(center, bbox):
    """
    Step 4 of Figure 5 in seglink paper: 
        Rotate the cropped horizontal bbox back to its original direction
    Args:
        center: the center of rotation
        bbox: [cx, cy, w, h, theta]
    Return: the oriented bbox
    """
    assert np.shape(center) == (2, ), "center must be a vector of length 2"
    assert np.shape(bbox) == (5, ) , "bbox must be a vector of length 4 or 5"
    bbox = np.asarray(bbox.copy(), dtype = np.float32)

    cx, cy, w, h, theta = bbox;
    M = cv2.getRotationMatrix2D(center, -theta, scale = 1) # 2x3
    cx, cy = np.dot(M, np.transpose([cx, cy, 1]))
    bbox[0:2] = [cx, cy]
    return bbox
项目:PlantRecognitionWebAPI    作者:ConorPai    | 项目源码 | 文件源码
def rotate_about_center(src, angle, scale=1.):
    if angle == 0:
        return src

    w = src.shape[1]
    h = src.shape[0]
    rangle = np.deg2rad(angle)  # angle in radians
    # now calculate new image width and height
    nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale
    nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale
    # ask OpenCV for the rotation matrix
    rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
    # calculate the move from the old center to the new center combined
    # with the rotation
    rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
    # the move only affects the translation, so update the translation
    # part of the transform
    rot_mat[0,2] += rot_move[0]
    rot_mat[1,2] += rot_move[1]
    return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)

#????????
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def rotate_2d(img, angle_in_degrees,random_mode=True, probability=0.5):
    if random_mode:
        if random.random() < probability:
            return img

    rot_mat = cv.getRotationMatrix2D(tuple(np.array(img.shape)/2), angle, 1.0)
    return apply_affine(img, rot_mat)
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def findCorners(contour):
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contour, -1, (255, 255, 255))
    rows,cols = img.shape[0], img.shape[1]
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5)
    dst = cv2.warpAffine(blank_image,M,(cols,rows))
    cv2.imshow("rotatio", dst)
    cv2.waitKey()"""
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    height_px_1 = box[0][1] - box[3][1]
    height_px_2 = box[1][1] - box[2][1]
    print height_px_1, height_px_2
    if height_px_1 < height_px_2:
        close_height_px = height_px_2
        far_height_px = height_px_1
    else:
        close_height_px = height_px_1
        far_height_px = height_px_2

    return close_height_px, far_height_px
项目:WashingMachine    作者:syangav    | 项目源码 | 文件源码
def rotate_bound(image, angle):
    # grab the dimensions of the image and then determine the
    # center
    (h, w) = image.shape[:2]
    (cX, cY) = (w // 2, h // 2)

    # grab the rotation matrix (applying the negative of the
    # angle to rotate clockwise), then grab the sine and cosine
    # (i.e., the rotation components of the matrix)
    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    # compute the new bounding dimensions of the image
    nW = int((h * sin) + (w * cos))
    nH = int((h * cos) + (w * sin))

    # adjust the rotation matrix to take into account translation
    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY

    # perform the actual rotation and return the image
    return cv2.warpAffine(image, M, (nW, nH))
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def create_design_data_set(labeled_designs,design_crop_dir,image_dir,test):
    labels = ['heads','tails']
    if test:
        pixels_to_jitter = 0
        angles = 1
    else:
        pixels_to_jitter = 2
        angles = 100

    for label in labels:
        dir = image_dir + label + '/'
        if not os.path.exists(dir):
            os.makedirs(dir)

    for coin_id, label in labeled_designs.iteritems():
        before_rotate_size = 56
        for image_id in range(0,56):
            #dir = design_crop_dir + str(coin_id / 100) + '/'
            class_dir = image_dir + label + '/'
            #for angle in range(0,10):
            filename = str(coin_id).zfill(5) + str(image_id).zfill(2) + '.png'
            image = cv2.imread(design_crop_dir + filename)
            image = cv2.resize(image, (before_rotate_size, before_rotate_size), interpolation=cv2.INTER_AREA)
            for count in range(0,angles):
                angle = random.random() * 360
                center_x = before_rotate_size / 2 + (random.random() * pixels_to_jitter * 2) - pixels_to_jitter
                center_y = before_rotate_size / 2 + (random.random() * pixels_to_jitter * 2) - pixels_to_jitter
                rot_image = image.copy()
                m = cv2.getRotationMatrix2D((center_x, center_y), angle, 1)
                cv2.warpAffine(rot_image, m, (before_rotate_size, before_rotate_size), rot_image, cv2.INTER_CUBIC)
                # This is hard coded for 28x28.
                rot_image = cv2.resize(rot_image, (41, 41), interpolation=cv2.INTER_AREA)
                rot_image = rot_image[6:34, 6:34]
                rotated_filename = filename.replace('.png', str(count).zfill(2) + '.png')
                cv2.imwrite(class_dir + rotated_filename,rot_image)
    sys.exit()
项目:sail    作者:GemHunt    | 项目源码 | 文件源码
def get_rotated_crop(crop_dir, crop_id, crop_size, angle):
    filename = get_filename_from(crop_id,crop_dir)
    crop = cv2.imread(filename)
    if crop == None:
        print crop_id, 'None'
        return None

    crop = cv2.resize(crop, (crop_size, crop_size), interpolation=cv2.INTER_AREA)
    if angle == None:
        angle = 0

    print crop_dir, crop_id, crop_size, angle
    m = cv2.getRotationMatrix2D((crop_size / 2, crop_size / 2), angle, 1)
    cv2.warpAffine(crop, m, (crop_size, crop_size), crop, cv2.INTER_CUBIC)
    return crop
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def rotate_about_center(src, angle, scale=1.):
    w = src.shape[1]
    h = src.shape[0]
    rangle = np.deg2rad(angle)  # angle in radians
    nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale
    nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale
    rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
    rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
    rot_mat[0,2] += rot_move[0]
    rot_mat[1,2] += rot_move[1]
    return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
项目:DeepTextSpotter    作者:MichalBusta    | 项目源码 | 文件源码
def get_normalized_image(img, rr, debug = False):

  box = cv2.boxPoints(rr)
  extbox = cv2.boundingRect(box)

  if extbox[2] *  extbox[3] > img.shape[0] * img.shape[1]:
    print("Too big proposal: {0}x{1}".format(extbox[2], extbox[3]))
    return None, None
  extbox = [extbox[0], extbox[1], extbox[2], extbox[3]]
  extbox[2] += extbox[0]
  extbox[3] += extbox[1]
  extbox = np.array(extbox, np.int)

  extbox[0] = max(0, extbox[0])
  extbox[1] = max(0, extbox[1])
  extbox[2] = min(img.shape[1], extbox[2])
  extbox[3] = min(img.shape[0], extbox[3])

  tmp = img[extbox[1]:extbox[3], extbox[0]:extbox[2]]
  center = (tmp.shape[1] / 2,  tmp.shape[0] / 2)
  rot_mat = cv2.getRotationMatrix2D( center, rr[2], 1 )

  if tmp.shape[0] == 0 or tmp.shape[1] == 0:
    return None, rot_mat

  if debug:
    vis.draw_box_points(img,  np.array(extbox, dtype="int"), color = (0, 255, 0))
    cv2.imshow('scaled', img)

  rot_mat[0,2] += rr[1][0] /2.0 - center[0]
  rot_mat[1,2] += rr[1][1] /2.0 - center[1]
  try:
    norm_line = cv2.warpAffine( tmp, rot_mat, (int(rr[1][0]), int(rr[1][1])), borderMode=cv2.BORDER_REPLICATE )
  except:
    return None, rot_mat
  return norm_line, rot_mat
项目:acdc_segmenter    作者:baumgach    | 项目源码 | 文件源码
def rotate_image(img, angle, interp=cv2.INTER_LINEAR):

        rows, cols = img.shape[:2]
        rotation_matrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
        return cv2.warpAffine(img, rotation_matrix, (cols, rows), flags=interp)
项目:face    作者:MOluwole    | 项目源码 | 文件源码
def DetectEyes(Image):
    Theta = 0
    rows, cols = Image.shape
    glass = glass_cas.detectMultiScale(Image)                                               # This ditects the eyes
    for (sx, sy, sw, sh) in glass:
        if glass.shape[0] == 2:                                                             # The Image should have 2 eyes
            if glass[1][0] > glass[0][0]:
                DY = ((glass[1][1] + glass[1][3] / 2) - (glass[0][1] + glass[0][3] / 2))    # Height difference between the glass
                DX = ((glass[1][0] + glass[1][2] / 2) - glass[0][0] + (glass[0][2] / 2))    # Width difference between the glass
            else:
                DY = (-(glass[1][1] + glass[1][3] / 2) + (glass[0][1] + glass[0][3] / 2))   # Height difference between the glass
                DX = (-(glass[1][0] + glass[1][2] / 2) + glass[0][0] + (glass[0][2] / 2))   # Width difference between the glass

            if (DX != 0.0) and (DY != 0.0):                                                 # Make sure the the change happens only if there is an angle
                Theta = math.degrees(math.atan(round(float(DY) / float(DX), 2)))            # Find the Angle
                print "Theta  " + str(Theta)

                M = cv2.getRotationMatrix2D((cols / 2, rows / 2), Theta, 1)                 # Find the Rotation Matrix
                Image = cv2.warpAffine(Image, M, (cols, rows))
                # cv2.imshow('ROTATED', Image)                                              # UNCOMMENT IF YOU WANT TO SEE THE

                Face2 = face.detectMultiScale(Image, 1.3, 5)                                # This detects a face in the image
                for (FaceX, FaceY, FaceWidth, FaceHeight) in Face2:
                    CroppedFace = Image[FaceY: FaceY + FaceHeight, FaceX: FaceX + FaceWidth]
                    return CroppedFace
项目:Face-Recognition    作者:irmowan    | 项目源码 | 文件源码
def im_rotate(im, landmark):
    """Rotate the image according to the angle of two eyes.

    Args:
        landmark: 5 points, left_eye, right_eye, nose, leftmouth, right_mouth
        im: image matrix

    Returns:
        A rotated image matrix.
        Rotated angle.
        Rotated landmark points.
    """
    ang = math.atan2(landmark[3] - landmark[1], landmark[2] - landmark[0])
    angle = ang / math.pi * 180
    center = tuple(np.array((im.shape[1] / 2.0, im.shape[0] / 2.0)))
    scale = 1.0

    rot_mat = cv2.getRotationMatrix2D(center, angle, scale)
    dst = cv2.warpAffine(im, rot_mat, (im.shape[1], im.shape[0]))
    # rotate 5 landmark points
    left_eye = point_trans(landmark[0:2], -ang, im.shape, im.shape)
    right_eye = point_trans(landmark[2:4], -ang, im.shape, im.shape)
    nose = point_trans(landmark[4:6], -ang, im.shape, im.shape)
    left_mouth = point_trans(landmark[6:8], -ang, im.shape, im.shape)
    right_mouth = point_trans(landmark[8:10], -ang, im.shape, im.shape)
    n_landmark = np.concatenate([left_eye, right_eye, nose, left_mouth, right_mouth])
    return dst, ang, n_landmark
项目:tf-cnn-lstm-ocr-captcha    作者:Luonic    | 项目源码 | 文件源码
def rotate(image, angle, center = None, scale = 1.0):
    (h, w) = image.shape[:2]

    if center is None:
        center = (w / 2, h / 2)

    # Perform the rotation
    M = cv2.getRotationMatrix2D(center, angle, scale)
    rotated = cv2.warpAffine(image, M, (w, h))

    return rotated
项目:sceneReco    作者:bear63    | 项目源码 | 文件源码
def dumpRotateImage(img,degree,pt1,pt2,pt3,pt4):
    height,width=img.shape[:2]
    heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
    widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
    matRotation=cv2.getRotationMatrix2D((width/2,height/2),degree,1)
    matRotation[0, 2] += (widthNew - width) / 2
    matRotation[1, 2] += (heightNew - height) / 2
    imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
    pt1 = list(pt1)
    pt3 = list(pt3)


    [[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
    [[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
    imgOut=imgRotation[int(pt1[1]):int(pt3[1]),int(pt1[0]):int(pt3[0])]
    height,width=imgOut.shape[:2]
    return imgOut
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def rotateImage(image, angle, center=None, scale=1.0):
    h, w = image.shape[:2]
    if center is None:
        center = (w / 2, h / 2)
    rot_mat = cv2.getRotationMatrix2D(center, angle, scale)
    result = cv2.warpAffine(image, rot_mat, (w, h), flags=cv2.INTER_CUBIC)
    return result
项目:serbian-alpr    作者:golubaca    | 项目源码 | 文件源码
def rotate_image(self, image, angle):
        image_center = tuple(np.array(image.shape) / 2)
        rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
        result = cv2.warpAffine(
            image,
            rot_mat,
            image.shape,
            flags=cv2.INTER_LINEAR)
        return result
项目:KAGGLE_CERVICAL_CANCER_2017    作者:ZFTurbo    | 项目源码 | 文件源码
def random_rotate(image):
    cols = image.shape[1]
    rows = image.shape[0]
    mean_color = np.mean(image, axis=(0, 1))

    angle = random.uniform(0, 90)
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
    if random.randint(0, 1) == 1:
        dst = cv2.warpAffine(image, M, (cols, rows), borderValue=mean_color, borderMode=cv2.BORDER_REFLECT)
    else:
        dst = cv2.warpAffine(image, M, (cols, rows), borderValue=mean_color)
    return dst
项目:luna16    作者:gzuidhof    | 项目源码 | 文件源码
def augment_with_params(self, Xb, shift_x, shift_y, rotation, random_flip, zoom, hue, saturation, value):


            # Define affine matrix
            # TODO: Should be able to incorporate flips directly instead of through an extra call
            M = cv2.getRotationMatrix2D((self.center_shift[0], self.center_shift[1]), rotation, zoom)
            M[0, 2] += shift_x
            M[1, 2] += shift_y

            augment_partial = partial(augment_image,
                                        M=M,
                                        random_flip=random_flip,
                                        random_hue=hue,
                                        random_saturation=saturation,
                                        random_value=value)

            if self.multiprocess:
                l = self.pool.map(augment_partial, Xb)
                Xbb = np.array(l)
            else:
                Xbb = np.zeros(Xb.shape, dtype=np.float32)
                for i in xrange(Xb.shape[0]):
                    Xbb[i] = augment_partial(Xb[i])


            return Xbb

# Augments a single image, singled out for easier profiling
项目:doc2text    作者:jlsutherland    | 项目源码 | 文件源码
def rotate(image, theta):
    (h, w) = image.shape[:2]
    center = (w / 2, h / 2)
    M = cv2.getRotationMatrix2D(center, theta, 1)
    rotated = cv2.warpAffine(image, M, (int(w), int(h)), cv2.INTER_LINEAR,
                             borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
    return rotated
项目:tf-openpose    作者:ildoonet    | 项目源码 | 文件源码
def pose_rotation(meta):
    deg = random.uniform(-40.0, 40.0)
    img = meta.img

    center = (img.shape[1] * 0.5, img.shape[0] * 0.5)
    rot_m = cv2.getRotationMatrix2D((center[0] - 0.5, center[1] - 0.5), deg, 1)
    ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
    if img.ndim == 3 and ret.ndim == 2:
        ret = ret[:, :, np.newaxis]
    neww, newh = RotationAndCropValid.largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
    neww = min(neww, ret.shape[1])
    newh = min(newh, ret.shape[0])
    newx = int(center[0] - neww * 0.5)
    newy = int(center[1] - newh * 0.5)
    # print(ret.shape, deg, newx, newy, neww, newh)
    img = ret[newy:newy + newh, newx:newx + neww]

    # adjust meta data
    adjust_joint_list = []
    for joint in meta.joint_list:
        adjust_joint = []
        for point in joint:
            if point[0] < -100 or point[1] < -100:
                adjust_joint.append((-1000, -1000))
                continue
            # if point[0] <= 0 or point[1] <= 0:
            #     adjust_joint.append((-1, -1))
            #     continue
            x, y = _rotate_coord((meta.width, meta.height), (newx, newy), point, deg)
            adjust_joint.append((x, y))
        adjust_joint_list.append(adjust_joint)

    meta.joint_list = adjust_joint_list
    meta.width, meta.height = neww, newh
    meta.img = img

    return meta
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def rotate(image, angle, center = None, scale = 1.0):
    # Grab the dimensions of the image
    (h, w) = image.shape[:2]
    # If the center is None, initialize it as the center of
    # the image
    if center is None:
        center = (w / 2, h / 2)
    # Perform the rotation
    M = cv2.getRotationMatrix2D(center, angle, scale)
    rotated = cv2.warpAffine(image, M, (w, h))
    # Return the rotated image
    return rotated
项目:Deep-Leafsnap    作者:sujithv28    | 项目源码 | 文件源码
def rotate(image, angle):
    (h, w) = image.shape[:2]
    (cX, cY) = (w // 2, h // 2)
    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])
    nW = int((h * sin) + (w * cos))
    nH = int((h * cos) + (w * sin))
    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY
    return cv2.warpAffine(image, M, (nW, nH))
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def rotate(image, angle, interpolation=cv2.INTER_CUBIC,
           borderMode=cv2.BORDER_REFLECT, borderValue=0):
    '''
    angle [deg]
    '''
    s0, s1 = image.shape
    image_center = (s0 - 1) / 2., (s1 - 1) / 2.
    rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
    result = cv2.warpAffine(image, rot_mat, image.shape,
                            flags=interpolation, borderMode=borderMode,
                            borderValue=borderValue)
    return result
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _rotate(img, angle):
        '''
        angle [DEG]
        '''
        s = img.shape
        if angle == 0:
            return img
        else:
            M = cv2.getRotationMatrix2D((s[1] // 2,
                                         s[0] // 2), angle, 1)
            return cv2.warpAffine(img, M, (s[1], s[0]))
项目:HandwritingRecognition    作者:eng-tsmith    | 项目源码 | 文件源码
def skew(img):
    """
    This function detects skew in images. It turn the image so that the baseline of image is straight.
    :param img: the image
    :return: rotated image
    """
    # coordinates of bottom black pixel in every column
    black_pix = np.zeros((2, 1))

    # Look at image column wise and in every column from bottom to top pixel. It stores the location of the first black
    # pixel in every column
    for columns in range(img.shape[1]):
        for pixel in np.arange(img.shape[0]-1, -1, -1):
            if img[pixel][columns] == 255:
                black_pix = np.concatenate((black_pix, np.array([[pixel], [columns]])), axis=1)
                break

    # Calculate linear regression to detect baseline
    mean_x = np.mean(black_pix[1][:])
    mean_y = np.mean(black_pix[0][:])
    k = black_pix.shape[1]
    a = (np.sum(black_pix[1][:] * black_pix[0][:]) - k * mean_x * mean_y) / (np.sum(black_pix[1][:] * black_pix[1][:]) - k * mean_x * mean_x)

    # Calculate angle by looking at gradient of linear function + data augmentation
    angle = np.arctan(a) * 180 / np.pi #+ random.uniform(-1, 1) #TODO dataug

    # Rotate image and use Nearest Neighbour for interpolation of pixel
    rows, cols = img.shape
    M = cv.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
    img_rot = cv.warpAffine(img, M, (cols, rows), flags=cv.INTER_NEAREST)

    return img_rot
项目:HandwritingRecognition    作者:eng-tsmith    | 项目源码 | 文件源码
def skew(img):
    """
    This function detects skew in images. It turns the image so that the baseline of image is straight.
    :param img: the image
    :return: rotated image
    """
    # coordinates of bottom black pixel in every column
    black_pix = np.zeros((2, 1))

    # Look at image column wise and in every column from bottom to top pixel. It stores the location of the first black
    # pixel in every column
    for columns in range(img.shape[1]):
        for pixel in np.arange(img.shape[0]-1, -1, -1):
            if img[pixel][columns] == 255:
                black_pix = np.concatenate((black_pix, np.array([[pixel], [columns]])), axis=1)
                break

    # Calculate linear regression to detect baseline
    mean_x = np.mean(black_pix[1][:])
    mean_y = np.mean(black_pix[0][:])
    k = black_pix.shape[1]
    a = (np.sum(black_pix[1][:] * black_pix[0][:]) - k * mean_x * mean_y) / (np.sum(black_pix[1][:] * black_pix[1][:]) - k * mean_x * mean_x)

    # Calculate angle by looking at gradient of linear function + data augmentation
    angle = np.arctan(a) * 180 / np.pi + random.uniform(-0.5, 0.5) #TODO dataug

    # Rotate image and use Nearest Neighbour for interpolation of pixel
    rows, cols = img.shape
    M = cv.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
    img_rot = cv.warpAffine(img, M, (cols, rows), flags=cv.INTER_NEAREST)

    return img_rot
项目:cnn-traffic-light-evaluation    作者:takeitallsource    | 项目源码 | 文件源码
def rotate(image, angles=DEFAULT_ANGLES):
    rotated = []

    for angle in angles:
        center = tuple(np.array(image.shape[:2]) / 2)
        R = cv2.getRotationMatrix2D(center, angle, 1.0)
        rotated_image = cv2.warpAffine(image, R, image.shape[:2], flags=cv2.INTER_CUBIC)
        rotated.append(rotated_image)

    return rotated
项目:image_servers    作者:takiyu    | 项目源码 | 文件源码
def rotateImg(img, deg):
    h, w = img.shape[:2]
    M = cv2.getRotationMatrix2D((w / 2, h / 2), deg, 1.0)
    rotated_img = cv2.warpAffine(img, M, (w, h))
    return rotated_img
项目:eva-didi    作者:eljefec    | 项目源码 | 文件源码
def rotate_image(img, radians):
    (rows, cols, channels) = img.shape
    degrees = math.degrees(radians)
    M = cv2.getRotationMatrix2D((cols/2, rows/2), degrees, 1)
    return cv2.warpAffine(img, M, (cols, rows))