Python Image 模块,BILINEAR 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用Image.BILINEAR

项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineFromRect(rect,new_size,filter=BILINEAR):
    ''' 
    Create a transform from a source rectangle to a new image.  This basically 
    crops a rectangle out of the image and rescales it to the new size.

    @param rect: the source link.Rect.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''
    w,h = new_size

    x_scale = float(w)/rect.w
    y_scale = float(h)/rect.h
    x_trans = -rect.x*x_scale
    y_trans = -rect.y*y_scale
    matrix = array([[x_scale,0,x_trans],[0,y_scale,y_trans],[0,0,1]],'d')

    return AffineTransform(matrix,new_size,filter)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineTranslate(dx,dy,new_size,filter=BILINEAR):
    '''
    Create a simple translation transform

    @param dx: translation in the x direction
    @param dy: translation in the y direction
    @param new_size: new size for the image
    @param filter: PIL filter to use    
    '''
    matrix = array([[1,0,dx],[0,1,dy],[0,0,1]],'d')

    return AffineTransform(matrix,new_size,filter)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineScale(scale,new_size,center=None,filter=BILINEAR):
    '''
    Create a simple scale transform.

    @param scale: the amount to scale the image.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''
    matrix = array([[scale,0,0],[0,scale,0],[0,0,1]],'d')

    scale = AffineTransform(matrix,new_size,filter)
    if center == None:
        return scale
    else:
        return AffineTranslate(center.X(),center.Y(),new_size)*scale*AffineTranslate(-center.X(),-center.Y(),new_size)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineNonUniformScale(sx,sy,new_size,filter=BILINEAR):
    '''
    Create a scale transform with different values for the x and y directions.

    @param sx: scale in the x direction.
    @param sy: scale in the y direction.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''
    matrix = array([[sx,0,0],[0,sy,0],[0,0,1]],'d')

    return AffineTransform(matrix,new_size,filter)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineRotate(theta,new_size,center=None,filter=BILINEAR):
    '''
    Create a rotation about the origin.

    @param theta: the angle to rotate the image in radians.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''
    matrix = array([[math.cos(theta),-math.sin(theta),0],[math.sin(theta),math.cos(theta),0],[0,0,1]],'d')

    rotate = AffineTransform(matrix,new_size,filter)
    if center == None:
        return rotate
    else:
        return AffineTranslate(center.X(),center.Y(),new_size)*rotate*AffineTranslate(-center.X(),-center.Y(),new_size)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineFromPoints(src1,src2,dst1,dst2,new_size,filter=BILINEAR):
    ''' 
    An affine transform that will rotate, translate, and scale to map one 
    set of points to the other. For example, to align eye coordinates in face images.

    Find a transform (a,b,tx,ty) such that it maps the source points to the 
    destination points::

        a*x1-b*y1+tx = x2
        b*x1+a*y1+ty = y2

    The mapping between the two points creates a set of  four linear equations 
    with four unknowns. This set of equations is solved to find the transform.

    @param src1: the first link.Point in the source image.
    @param src2: the second link.Point in the source image.
    @param dst1: the first link.Point in the destination image.
    @param dst2: the second link.Point in the destination image.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''

    # Compute the transformation parameters
    A = [[src1.X(),-src1.Y(),1,0],
         [src1.Y(),src1.X(),0,1],
         [src2.X(),-src2.Y(),1,0],
         [src2.Y(),src2.X(),0,1]]
    b = [dst1.X(),dst1.Y(),dst2.X(),dst2.Y()]
    A = array(A)
    b = array(b)
    result = solve(A,b)

    a,b,tx,ty = result    
    # Create the transform matrix
    matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d')

    return AffineTransform(matrix,new_size,filter)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def __init__(self,matrix,new_size,filter=BILINEAR):
        '''
        Constructor for the AffineTransform.  See also the affine transform factories.

        @param matrix: a 3-by-3 matrix that defines the transformation.
        @param new_size: the size of any new images created by this affine transform.
        @param filter: the image filtering function used for interpolating between pixels.
        @returns: an AffineTransform object
        '''
        self.matrix = matrix
        self.inverse = inv(matrix)
        self.size = int(new_size[0]),int(new_size[1])
        self.filter = filter
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def save_patch( img, annot, path, net_name ) :
    left    = int( annot[0] )
    btm     = int( annot[1] )
    width   = int( annot[2] )
    height  = int( annot[3] )
    tmp_img = img.crop( (left, btm, left+width, btm+height) )
    if net_name == '12net' :
        tmp_img.resize( (12, 12), Image.BILINEAR ).save(path)
    elif net_name == '24net' :
        tmp_img.resize( (24, 24), Image.BILINEAR ).save(path)
    elif net_name == '48net' :
        tmp_img.resize( (48, 48), Image.BILINEAR ).save(path)
    return 1
项目:text-renderer    作者:cjnolet    | 项目源码 | 文件源码
def resize_image(im, r=None, newh=None, neww=None, filtering=Image.BILINEAR):
    dt = im.dtype
    I = Image.fromarray(im)
    if r is not None:
        h = im.shape[0]
        w = im.shape[1]
        newh = int(round(r*h))
        neww = int(round(r*w))
    if neww is None:
        neww = int(newh*im.shape[1]/float(im.shape[0]))
    if newh > im.shape[0]:
        I = I.resize([neww, newh], Image.ANTIALIAS)
    else:
        I.thumbnail([neww, newh], filtering)
    return n.array(I).astype(dt)
项目:CNCGToolKit    作者:cineuse    | 项目源码 | 文件源码
def deform(image, deformer, resample=Image.BILINEAR):
    "Deform image using the given deformer"
    return image.transform(
        image.size, Image.MESH, deformer.getmesh(image), resample
        )

##
# Equalize the image histogram.  This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask.  If given, only the pixels selected by
#     the mask are included in the analysis.
# @return An image.
项目:InstagramPosting    作者:LeviParadis    | 项目源码 | 文件源码
def deform(image, deformer, resample=Image.BILINEAR):
    "Deform image using the given deformer"
    return image.transform(
        image.size, Image.MESH, deformer.getmesh(image), resample
        )

##
# Equalize the image histogram.  This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask.  If given, only the pixels selected by
#     the mask are included in the analysis.
# @return An image.
项目:ngx_status    作者:YoYoAdorkable    | 项目源码 | 文件源码
def deform(image, deformer, resample=Image.BILINEAR):
    "Deform image using the given deformer"
    return image.transform(
        image.size, Image.MESH, deformer.getmesh(image), resample
        )

##
# Equalize the image histogram.  This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask.  If given, only the pixels selected by
#     the mask are included in the analysis.
# @return An image.
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineFromPointsLS(src,dst,new_size,filter=BILINEAR, normalize=True):  
    '''
     An affine transform that will rotate, translate, and scale to map one 
     set of points to the other. For example, to align eye coordinates in face images.

     Find a transform (a,b,tx,ty) such that it maps the source points to the 
     destination points::

         a*x1-b*y1+tx = x2
         b*x1+a*y1+ty = y2

     This method minimizes the squared error to find an optimal fit between the 
     points.

     @param src: a list of link.Points in the source image.
     @param dst: a list of link.Points in the destination image.
     @param new_size: new size for the image.
     @param filter: PIL filter to use.
    '''  
    if normalize:
        # Normalize Points
        src_norm = AffineNormalizePoints(src)
        src = src_norm.transformPoints(src)
        dst_norm = AffineNormalizePoints(dst)
        dst = dst_norm.transformPoints(dst)

    # Compute the transformation parameters
    A = []
    b = []
    for i in range(len(src)):
        A.append([src[i].X(),-src[i].Y(),1,0])
        A.append([src[i].Y(), src[i].X(),0,1])
        b.append(dst[i].X())
        b.append(dst[i].Y())

    A = array(A)
    b = array(b)

    result,resids,rank,s = lstsq(A,b)

    a,b,tx,ty = result    
    # Create the transform matrix
    matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d')

    if normalize:
        matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix))

    return AffineTransform(matrix,new_size,filter)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineFromPointsRANSAC(src,dst,new_size,filter=BILINEAR, normalize=True,tol=0.15):
    '''
    An affine transform that will rotate, translate, and scale to map one 
    set of points to the other. For example, to align eye coordinates in face images.

    Find a transform (a,b,tx,ty) such that it maps the source points to the 
    destination points::

        a*x1-b*y1+tx = x2
        b*x1+a*y1+ty = y2

    This method minimizes the squared error to find an optimal fit between the 
    points.  Instead of a LS solver the RANSAC solver is used to
    produce a transformation that is robust to outliers.

    @param src: a list of link.Points in the source image.
    @param dst: a list of link.Points in the destination image.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''
    if normalize:
        # Normalize Points
        src_norm = AffineNormalizePoints(src)
        src = src_norm.transformPoints(src)
        dst_norm = AffineNormalizePoints(dst)
        dst = dst_norm.transformPoints(dst)

    # Compute the transformation parameters
    A = []
    b = []
    for i in range(len(src)):
        A.append([src[i].X(),-src[i].Y(),1,0])
        A.append([src[i].Y(), src[i].X(),0,1])
        b.append(dst[i].X())
        b.append(dst[i].Y())

    A = array(A)
    b = array(b)

    result = RANSAC(A,b,tol=tol,group=2)

    #print result,resids,rank,s 

    a,b,tx,ty = result    
    # Create the transform matrix
    matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d')

    if normalize:
        matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix))

    return AffineTransform(matrix,new_size,filter)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def AffineFromPointsLMeDs(src,dst,new_size,filter=BILINEAR, normalize=True):
    '''
    An affine transform that will rotate, translate, and scale to map one 
    set of points to the other. For example, to align eye coordinates in face images.

    Find a transform (a,b,tx,ty) such that it maps the source points to the 
    destination points::

        a*x1-b*y1+tx = x2
        b*x1+a*y1+ty = y2

    This method minimizes the squared error to find an optimal fit between the 
    points.  Instead of a LS solver the RANSAC solver is used to
    produce a transformation that is robust to outliers.

    @param src: a list of link.Points in the source image.
    @param dst: a list of link.Points in the destination image.
    @param new_size: new size for the image.
    @param filter: PIL filter to use.
    '''
    if normalize:
        # Normalize Points
        src_norm = AffineNormalizePoints(src)
        src = src_norm.transformPoints(src)
        dst_norm = AffineNormalizePoints(dst)
        dst = dst_norm.transformPoints(dst)

    # Compute the transformation parameters
    A = []
    b = []
    for i in range(len(src)):
        A.append([src[i].X(),-src[i].Y(),1,0])
        A.append([src[i].Y(), src[i].X(),0,1])
        b.append(dst[i].X())
        b.append(dst[i].Y())

    A = array(A)
    b = array(b)

    result = LMeDs(A,b)

    #print result,resids,rank,s 

    a,b,tx,ty = result    
    # Create the transform matrix
    matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d')

    if normalize:
        matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix))

    return AffineTransform(matrix,new_size,filter)
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def create_ns (tmp_imgpath, cnt_ns ) :
    global pyramids

    tmp_img = Image.open("%s/%s" %(coco_path, tmp_imgpath), 'r' )
    pyramids = list( pyramid_gaussian( tmp_img, downscale=math.sqrt(2) ) )

    for i in range ( len(pyramids) ):
        if min( pyramids[i].shape[0], pyramids[i].shape[1] ) < MinFace :
            del pyramids[i:]
            break

    # for j in range(4) :
    for j in range(36) :
        # creating random index
        img_index = random.randint(0, len(pyramids)-1 )
        tmp_patch_num = ( pyramids[img_index].shape[0] - 12 + 1) * ( pyramids[img_index].shape[1] - 12 + 1)
        rand_index = random.randint(0, tmp_patch_num)

        # x, y position decoding
        row_max = pyramids[img_index].shape[0]
        col_max = pyramids[img_index].shape[1]
        row = 0
        col = rand_index

        while ( col >= col_max - 12 +1 ) :
            row = row + 1
            col = col - (col_max-12+1)

        flag = 0
        # Rejecting Black and White image
        tmp_ns = pyramids[img_index][row:row+12, col:col+12]
        if not len(tmp_ns.shape)==3 :
            print " Gray Image. Skip "
            return 0

        # Rejecting Positive Samples
        scale_factor = math.sqrt(2)**img_index

        tmp_ns = pyramids[img_index][row:row+12, col:col+12]
        tmp_ns = Image.fromarray((tmp_ns*255.0).astype(np.uint8) )
        # tmp_ns = tmp_ns.resize( (12,12), Image.BICUBIC )
        tmp_ns = tmp_ns.resize( (12,12), Image.BILINEAR )
        tmp_ns.save("%s/ns-%s.jpg" %(ns_path, cnt_ns+j) )

    return 1

# -----------------------------------------
项目:mlstudy_week7    作者:ByungKeon-Ko    | 项目源码 | 文件源码
def create_ns (tmp_imgpath, cnt_ns, network_list, threshold1, threshold2 ) :

    tmp_img = Image.open("%s/%s" %(coco_path, tmp_imgpath), 'r' )
    org_img = Image.open("%s/%s" %(coco_path, tmp_imgpath), 'r' )

    down_scale = 1
    if (max(tmp_img.size[0], tmp_img.size[1]) > 3000) :
        down_scale = 6
    elif (max(tmp_img.size[0], tmp_img.size[1]) > 2500) :
        down_scale = 5
    elif (max(tmp_img.size[0], tmp_img.size[1]) > 2000) :
        down_scale = 4
    elif (max(tmp_img.size[0], tmp_img.size[1]) > 1500) :
        down_scale = 3
    elif (max(tmp_img.size[0], tmp_img.size[1]) > 1000) :
        down_scale = 2
    elif (max(tmp_img.size[0], tmp_img.size[1]) > 750) :
        down_scale = 1.5
    size = tmp_img.size

    resize_ratio = float(MinFace)/ 12. * down_scale
    try :
        tmp_img = tmp_img.resize( (int(size[0]/resize_ratio), int(size[1]/resize_ratio)), Image.BILINEAR )
    except IOError :
        sys.exit("truncated byte error!")

    false_pos_annot = image_search.detect_pos(network_list, tmp_img, threshold1, threshold2, 'aflw', resize_ratio )

    if len(false_pos_annot) <= 0 :
        return 0

    # if type(false_pos_annot)==list :
    #   false_pos_annot = image_search.apply_nms( false_pos_annot )
    # else :
    #   false_pos_annot = image_search.apply_nms( false_pos_annot.tolist() )

    # image_search.save_annot(false_pos_annot)
    cnt_save = 0
    for j in xrange( len(false_pos_annot) ) :
        if net_name == '24net' :
            path = "%s/NS_det24_2/ns-%d.jpg" %(base_path, cnt_ns+cnt_save)
            org_img = org_img.convert('RGB')
            image_search.save_patch(org_img, false_pos_annot[j], path, net_name)
            cnt_save = cnt_save + 1

        elif net_name == '48net' :
            path = "%s/NS_det48_2/ns-%d.jpg" %(base_path, cnt_ns+cnt_save)
            org_img = org_img.convert('RGB')
            image_search.save_patch(org_img, false_pos_annot[j], path, net_name)
            cnt_save = cnt_save + 1

    return cnt_save

# -----------------------------------------