Python PIL.Image 模块,BICUBIC 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用PIL.Image.BICUBIC

项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSizeX, opt.loadSizeY]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSizeX)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list)
项目:road_simulator    作者:vinzeebreak    | 项目源码 | 文件源码
def call(self, img):

        if img is None:
            raise ValueError('img is None')

        width, height = img.size
        sym = img.copy()

        symmetry = False
        if random() < self.proba:
            from_points = [(0, 0), (width-1, 0), (width-1, height-1), (0, height-1)]
            new_points = [(width-1, 0), (0, 0), (0, height-1), (width-1, height-1)]
            coeffs = find_coeffs(new_points, from_points)
            sym = sym.transform((width, height), Image.PERSPECTIVE, coeffs, Image.BICUBIC)
            symmetry = True
        return sym, symmetry
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def __getitem__(self, index):
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')
        AB = AB.resize((self.opt.loadSizeX * 2, self.opt.loadSizeY), Image.BICUBIC)
        AB = self.transform(AB)

        w_total = AB.size(2)
        w = int(w_total / 2)
        h = AB.size(1)
        w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1))
        h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1))

        A = AB[:, h_offset:h_offset + self.opt.fineSize,
               w_offset:w_offset + self.opt.fineSize]
        B = AB[:, h_offset:h_offset + self.opt.fineSize,
               w + w_offset:w + w_offset + self.opt.fineSize]

        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)

        return {'A': A, 'B': B,
                'A_paths': AB_path, 'B_paths': AB_path}
项目:PrivacyScore    作者:PrivacyScore    | 项目源码 | 文件源码
def pixelize_screenshot(screenshot, screenshot_pixelized, target_width=390, pixelsize=3):
    """
    Thumbnail a screenshot to `target_width` and pixelize it.

    :param screenshot: Screenshot to be thumbnailed in pixelized
    :param screenshot_pixelized: File to which the result should be written
    :param target_width: Width of the final thumbnail
    :param pixelsize: Size of the final pixels
    :return: None
    """
    if target_width % pixelsize != 0:
        raise ValueError("pixelsize must divide target_width")

    img = Image.open(screenshot)
    width, height = img.size
    if height > width:
        img = img.crop((0, 0, width, width))
        height = width
    undersampling_width = target_width // pixelsize
    ratio = width / height
    new_height = int(undersampling_width / ratio)
    img = img.resize((undersampling_width, new_height), Image.BICUBIC)
    img = img.resize((target_width, new_height * pixelsize), Image.NEAREST)
    img.save(screenshot_pixelized, format='png')
项目:1-tk1-zener-learner    作者:mlennox    | 项目源码 | 文件源码
def crop_resize(img, dimension):
    inv_img = ImageOps.invert(img.convert("RGB"))
    # returns left, upper, right, lower
    left, upper, right, lower = inv_img.getbbox()
    width = right - left
    height = lower - upper
    if width > height:
        # we want to add half the difference between width and height
        # to the upper and lower dimension
        padding = int(math.floor((width - height) / 2))
        upper -= padding
        lower += padding
    else:
        padding = int(math.floor((height - width) / 2))
        left -= padding
        right += padding

    img = img.crop((left, upper, right, lower))

    # Image.LANCZOS
    # Image.BICUBIC
    return img.resize((dimension, dimension), Image.LANCZOS)


# pulls together all the methods to distort and finalise the image
项目:flickr-crawler    作者:chenusc11    | 项目源码 | 文件源码
def create_wallpaper(screen, urls, size=(100, 100), randomise=False):
    if randomise:
        random.shuffle(urls)

    wallpaper = Image.new("RGB", screen, "blue")

    width = int(math.ceil(float(screen[0]) / size[0]))
    height = int(math.ceil(float(screen[1]) / size[1]))

    offset = [0,0]
    for i in xrange(height):
        y = size[1] * i
        for j in xrange(width):
            x = size[0] * j
            photo = load_photo(urls.pop())
            if photo.size != size:
                photo = photo.resize(size, Image.BICUBIC)
            wallpaper.paste(photo, (x, y))
            del photo
    return wallpaper
项目:ATX    作者:NetEaseGame    | 项目源码 | 文件源码
def _adb_screencap(self, scale=1.0):
        """
        capture screen with adb shell screencap
        """
        remote_file = tempfile.mktemp(dir='/data/local/tmp/', prefix='screencap-', suffix='.png')
        local_file = tempfile.mktemp(prefix='atx-screencap-', suffix='.png')
        self.shell('screencap', '-p', remote_file)
        try:
            self.pull(remote_file, local_file)
            image = imutils.open_as_pillow(local_file)
            if scale is not None and scale != 1.0:
                image = image.resize([int(scale * s) for s in image.size], Image.BICUBIC)
            rotation = self.rotation()
            if rotation:
                method = getattr(Image, 'ROTATE_{}'.format(rotation*90))
                image = image.transpose(method)
            return image
        finally:
            self.remove(remote_file)
            os.unlink(local_file)
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def get_image(self, path):
        im = Image.open(path)
        height = im.size[1]
        width = im.size[0]
        mark = False
        if width < height:
            mark = True
            height = width
        if height < self.height:
           return None,None,None

        if mark:
            im = im.rotate(90)

        box = [0,0,self.width,self.height]

        HR4 = im.crop(box)
        HR2 = HR4.resize((int(self.width /2),int(self.height / 2)),Image.BICUBIC)
        LR = HR4.resize((int(self.width / 4), int(self.height / 4)), Image.BICUBIC)
        return asarray(LR.convert('L'))\
            ,asarray(HR2.convert('L')), \
               asarray(HR4.convert('L'))
项目:Roomba980-Python    作者:NickWaterton    | 项目源码 | 文件源码
def ScaleRotateTranslate(self, image, angle, center=None, new_center=None,
                             scale=None, expand=False):
        '''
        experimental - not used yet
        '''
        if center is None:
            return image.rotate(angle, expand)
        angle = -angle / 180.0 * math.pi
        nx, ny = x, y = center
        if new_center != center:
            (nx, ny) = new_center
        sx = sy = 1.0
        if scale:
            (sx, sy) = scale
        cosine = math.cos(angle)
        sine = math.sin(angle)
        a = cosine / sx
        b = sine / sx
        c = x - nx * a - ny * b
        d = -sine / sy
        e = cosine / sy
        f = y - nx * d - ny * e
        return image.transform(image.size, Image.AFFINE,
                               (a,b,c,d,e,f), resample=Image.BICUBIC)
项目:Roomba980-Python    作者:NickWaterton    | 项目源码 | 文件源码
def ScaleRotateTranslate(self, image, angle, center=None, new_center=None,
                             scale=None, expand=False):
        '''
        experimental - not used yet
        '''
        if center is None:
            return image.rotate(angle, expand)
        angle = -angle / 180.0 * math.pi
        nx, ny = x, y = center
        if new_center != center:
            (nx, ny) = new_center
        sx = sy = 1.0
        if scale:
            (sx, sy) = scale
        cosine = math.cos(angle)
        sine = math.sin(angle)
        a = cosine / sx
        b = sine / sx
        c = x - nx * a - ny * b
        d = -sine / sy
        e = cosine / sy
        f = y - nx * d - ny * e
        return image.transform(image.size, Image.AFFINE,
                               (a,b,c,d,e,f), resample=Image.BICUBIC)
项目:AutomatorX    作者:xiaoyaojjian    | 项目源码 | 文件源码
def _adb_screencap(self, scale=1.0):
        """
        capture screen with adb shell screencap
        """
        remote_file = tempfile.mktemp(dir='/data/local/tmp/', prefix='screencap-', suffix='.png')
        local_file = tempfile.mktemp(prefix='atx-screencap-', suffix='.png')
        self.shell('screencap', '-p', remote_file)
        try:
            self.pull(remote_file, local_file)
            image = Image.open(local_file)
            image.load() # because Image is a lazy load function
            if scale is not None and scale != 1.0:
                image = image.resize([int(scale * s) for s in image.size], Image.BICUBIC)
            rotation = self.rotation()
            if rotation:
                method = getattr(Image, 'ROTATE_{}'.format(rotation*90))
                image = image.transpose(method)
            return image
        finally:
            self.remove(remote_file)
            os.unlink(local_file)
项目:budgie-extras    作者:UbuntuBudgie    | 项目源码 | 文件源码
def create_minutes(hx):
    tmp_min = os.path.join(tmp, "tmp_min.svg")
    minute = svgwrite.Drawing(
        filename=tmp_min, size=("100px", "100px")
    )
    minute.add(minute.line(
        start=(50, 45), end=(50, 10), stroke_width="3",
        stroke=hex2rgb(hx),
    )
    )
    minute.save()
    temp_minspng = tmp_min.replace(".svg", ".png")
    create_png(tmp_min, temp_minspng)
    # minutes
    min_source = Image.open(temp_minspng)
    for n in range(60):
        rotate = n * 6
        newminspath = os.path.join(mins_path, str(n) + ".png")
        new_min = min_source.rotate(
            rotate, resample=Image.BICUBIC, expand=False
        )
        new_min.save(newminspath)
项目:budgie-extras    作者:UbuntuBudgie    | 项目源码 | 文件源码
def create_hours(hx):
    tmp_hr = os.path.join(tmp, "tmp_hr.svg")
    hr = svgwrite.Drawing(
        filename=tmp_hr, size=("100px", "100px")
    )
    hr.add(hr.line(
        start=(50, 45), end=(50, 20), stroke_width="3",
        stroke=hex2rgb(hx),
    )
    )
    hr.save()
    temp_hrspng = tmp_hr.replace(".svg", ".png")
    create_png(tmp_hr, temp_hrspng)
    # hours
    hr_source = Image.open(temp_hrspng)
    for n in range(60):
        rotate = n * 6
        newhrpath = os.path.join(hrs_path, str(n) + ".png")
        new_hr = hr_source.rotate(
            rotate, resample=Image.BICUBIC, expand=False
        )
        new_hr.save(newhrpath)
项目:Comicolorization    作者:DwangoMediaVillage    | 项目源码 | 文件源码
def _make_input_panel_image(panel_image, input_panel_rect, input_width):
    """
    Make input image for neural network model
    :param panel_image: source panel image
    :param input_panel_rect: rectangle calculated by _calc_input_panel_rect
    :param input_width: width of input image for neural network model
    :return: input image for neural network model
    """
    x, y, _w, _h = (int(value) for value in input_panel_rect)
    w, h = _w - x, _h - y

    img = panel_image.convert('L')
    img = img.resize((w, h), Image.BICUBIC)

    bg = Image.new('RGB', (input_width, input_width), '#ffffff')
    bg.paste(img, (x, y))
    return bg
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def manipulate_frame(self, frame_image, faces, index):
        # Instantiates a client
        googly_eye = Image.open(self.__class__.get_os_path('overlays/eye.png'))

        for face in faces:

            for side in ('left', 'right'):

                ((lcx, lcy), (ex, ey), (rcx, rcy)) = face.get_eye_coords(side)

                ew = int(1.5 * math.hypot(rcx - lcx, rcy - lcy))

                pasted = googly_eye.rotate(random.randint(0, 360), Image.BICUBIC).resize((ew, ew), Image.BICUBIC)
                frame_image.paste(pasted, (int(ex - ew/2), int(ey - ew/2)), pasted)

        return frame_image
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def cry_frame(self, frame_image, faces, index):
        # Instantiates a client
        tear = Image.open(self.__class__.get_os_path('overlays/tearblood.png'))

        lowest = 0

        for face in faces:

            for side in ('left', 'right'):

                ((lcx, lcy), (ex, ey), (rcx, rcy)) = face.get_eye_coords(side)

                ew = int(1.25 * math.hypot(rcx - lcx, rcy - lcy))

                pasted = tear.resize((ew, ew), Image.BICUBIC)
                left_y = int(lcy + (index * ew * 1.5) + (ew * .75))
                right_y = int(rcy + (index * ew * 1.5) + (ew * .5) )
                frame_image.paste(pasted, (int(lcx - ew/2), left_y), pasted)
                frame_image.paste(pasted, (int(rcx - ew/2), right_y), pasted)
                lowest = max(left_y, right_y)

        return lowest
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def cry_frame(self, frame_image, faces, index):
        # Instantiates a client
        tear = Image.open(self.__class__.get_os_path('overlays/tear.png'))

        lowest = 0

        for face in faces:

            for side in ('left', 'right'):

                ((lcx, lcy), (ex, ey), (rcx, rcy)) = face.get_eye_coords(side)

                ew = int(1.25 * math.hypot(rcx - lcx, rcy - lcy))

                pasted = tear.resize((ew, ew), Image.BICUBIC).rotate(face.angles.tilt, Image.BICUBIC)
                left_y = int(lcy + (index * ew * 1.5) + (ew * .5))
                right_y = int(rcy + (index * ew * 1.5) + (ew * .75) )
                frame_image.paste(pasted, (int(lcx - ew/2), left_y), pasted)
                frame_image.paste(pasted, (int(rcx - ew/2), right_y), pasted)
                lowest = max(left_y, right_y)

        return lowest
项目:inyourface    作者:yacomink    | 项目源码 | 文件源码
def manipulate_frame(self, frame_image, faces, index):
        # Instantiates a client
        hand = Image.open(self.__class__.get_os_path('overlays/thinking-hand.png'))

        for face in faces:

            ((ex,ey), (rx,ry)) = face.get_paired_landmark_coords('chin_%s_gonion')

            ew = int((rx - ex) * 0.6)

            start = -1 * ew
            end = ey - ew/3

            progress = float(index+1) / float(self.total_frames * 0.7)
            height = start + progress * (end - start)
            if (height > end):
                height = end

            pasted = hand.resize((ew, ew), Image.BICUBIC).rotate(face.angles.tilt, Image.BICUBIC)
            frame_image.paste(pasted, (int(ex), int(height)), pasted)

        return frame_image
项目:Imagyn    作者:zevisert    | 项目源码 | 文件源码
def skew_image(img, angle):
    """
    Skew image using some math
    :param img: PIL image object
    :param angle: Angle in radians (function doesn't do well outside the range -1 -> 1, but still works)
    :return: PIL image object
    """
    width, height = img.size
    # Get the width that is to be added to the image based on the angle of skew
    xshift = tan(abs(angle)) * height
    new_width = width + int(xshift)

    if new_width < 0:
        return img

    # Apply transform
    img = img.transform(
        (new_width, height),
        Image.AFFINE,
        (1, angle, -xshift if angle > 0 else 0, 0, 1, 0),
        Image.BICUBIC
    )

    return img
项目:Tenma    作者:Tenma-Server    | 项目源码 | 文件源码
def optimize_image(image_path, output_quality, base_width):
   ''' Optimizes image and returns a filepath string '''

   img = Image.open(image_path)

   # Check that it's a supported format
   format = str(img.format)
   if format == 'PNG' or format == 'JPEG':
      if base_width < img.size[0]:
         wpercent = (base_width/float(img.size[0]))
         hsize = int((float(img.size[1])*float(wpercent)))
         img = img.resize((base_width,hsize), Image.BICUBIC)
      # The 'quality' option is ignored for PNG files
      img.save(image_path, quality=output_quality, optimize=True)

   return image_path


#==============================================================================
项目:RobotframeworkAuto-for-PEP_PRO    作者:xiaoyaojjian    | 项目源码 | 文件源码
def img(self, Local_initial_address, i, local_store_address):
        print Local_initial_address, i, local_store_address
        # tt = u'lvziqing'
        im = Image.open(Local_initial_address)
        text = time.ctime()
        width, height = im.size
        print width, height
        txt = Image.new('RGB', im.size, (0, 0, 0, 0))
        text_width = (txt.size[0] - 280)
        text_height = (txt.size[1] - 130)
        # watermark = txt.resize((text_width,text_height), Image.ANTIALIAS)
        draw = ImageDraw.Draw(txt, 'RGBA')  # ???????
        draw.text((text_width, text_height),
                  text, fill=(255,255,255))
        watermark = txt.rotate(23, Image.BICUBIC)
        alpha = watermark.split()[2]
        alpha = ImageEnhance.Brightness(alpha).enhance(0.50)
        watermark.putalpha(alpha)
        a = local_store_address + 'ceshi' + str(i) + '.jpg'
        Image.composite(watermark, im, watermark).save(a, 'JPEG')
        return a
项目:pytorch_60min_blitz    作者:kyuhyoung    | 项目源码 | 文件源码
def __getitem__(self, index):

        fn_img, target = self.li_fn_img_classid[index]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        #img = Image.fromarray(img)
        img = Image.open(fn_img).convert('RGB')
        #img.re
        #margin_x, margin_y = index % 5, (index ** 2) % 5
        margin_x, margin_y = 3, 3
        size_new = tuple(map(sum, zip(img.size, (-margin_x, -margin_y))))
        #size_new = img.size - (margin_x, margin_y)
        img = img.resize(size_new, Image.BICUBIC)
        if self.transform is not None:
            img = self.transform(img)
        return img, target
项目:CycleGANwithPerceptionLoss    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def __getitem__(self, index):
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')
        AB = AB.resize((self.opt.loadSize * 2, self.opt.loadSize), Image.BICUBIC)
        AB = self.transform(AB)

        w_total = AB.size(2)
        w = int(w_total / 2)
        h = AB.size(1)
        w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1))
        h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1))

        A = AB[:, h_offset:h_offset + self.opt.fineSize,
               w_offset:w_offset + self.opt.fineSize]
        B = AB[:, h_offset:h_offset + self.opt.fineSize,
               w + w_offset:w + w_offset + self.opt.fineSize]

        if (not self.opt.no_flip) and random.random() < 0.5:
            idx = [i for i in range(A.size(2) - 1, -1, -1)]
            idx = torch.LongTensor(idx)
            A = A.index_select(2, idx)
            B = B.index_select(2, idx)

        return {'A': A, 'B': B,
                'A_paths': AB_path, 'B_paths': AB_path}
项目:CycleGANwithPerceptionLoss    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list)
项目:pytorch-CycleGAN-and-pix2pix    作者:junyanz    | 项目源码 | 文件源码
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list)
项目:CNN-Glasses-Remover    作者:JubilantJerry    | 项目源码 | 文件源码
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
    if (scale is None) and (center is None):
        return image.rotate(angle=angle, resample=resample)
    nx,ny = x,y = center
    sx=sy=1.0
    if new_center:
        (nx,ny) = new_center
    if scale:
        (sx,sy) = (scale, scale)
    cosine = math.cos(angle)
    sine = math.sin(angle)
    a = cosine/sx
    b = sine/sx
    c = x-nx*a-ny*b
    d = -sine/sy
    e = cosine/sy
    f = y-nx*d-ny*e
    return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
项目:dancedeets-monorepo    作者:mikelambert    | 项目源码 | 文件源码
def _generate_image(event):
    image_data = event_image.get_image(event)
    im = Image.open(StringIO.StringIO(image_data))
    image_size = (im.width, im.height)
    scale = tuple(1.0 * x / y for x, y in zip(full_size, image_size))

    # Generate the background-image that is blurred and backgrounds the main image
    max_scale = max(scale)
    background_image_new_size = tuple(int(round(max_scale * x)) for x in image_size)
    background_resized = im.resize(background_image_new_size, resample=Image.BICUBIC)
    background_blurred = background_resized.filter(ImageFilter.GaussianBlur(100))
    background_offset = tuple((x - y) / 2 for x, y in zip(full_size, background_image_new_size))

    # Generate the scaled-image that fits the frame exactly
    min_scale = min(scale)
    foreground_image_size = tuple(int(round(min_scale * x)) for x in image_size)
    foreground_resized = im.resize(foreground_image_size, resample=Image.BICUBIC)
    foreground_offset = tuple((x - y) / 2 for x, y in zip(full_size, foreground_image_size))

    target = Image.new('RGB', full_size)
    target.paste(background_blurred, background_offset)
    target.paste(foreground_resized, foreground_offset)

    return target
项目:paint_transfer_c92    作者:Hiroshiba    | 项目源码 | 文件源码
def __call__(self, image: Image.Image, test):
        base_size = image.size

        rand = numpy.random.rand(1) if not test else 0.5

        short = rand * (self._max_short - self._min_short + 1) + self._min_short
        short = int(short)
        if short > self._max_short:
            short = self._max_short

        scale = max([short / image.size[0], short / image.size[1]])
        size_resize = (round(image.size[0] * scale), round(image.size[1] * scale))

        if base_size != size_resize:
            image = image.resize(size_resize, resample=Image.BICUBIC)

        return image
项目:Sleep-Early    作者:AliNL    | 项目源码 | 文件源码
def _adb_screencap(self, scale=1.0):
        """
        capture screen with adb shell screencap
        """
        remote_file = tempfile.mktemp(dir='/data/local/tmp/', prefix='screencap-', suffix='.png')
        local_file = tempfile.mktemp(prefix='atx-screencap-', suffix='.png')
        self.shell('screencap', '-p', remote_file)
        try:
            self.pull(remote_file, local_file)
            image = imutils.open_as_pillow(local_file)
            if scale is not None and scale != 1.0:
                image = image.resize([int(scale * s) for s in image.size], Image.BICUBIC)
            rotation = self.rotation()
            if rotation:
                method = getattr(Image, 'ROTATE_{}'.format(rotation*90))
                image = image.transpose(method)
            return image
        finally:
            self.remove(remote_file)
            os.unlink(local_file)
项目:deepanalytics_compe26_benchmark    作者:takagiwa-ss    | 项目源码 | 文件源码
def _load_rawdata(df, dir_images):
    '''????????4D???? (len(df), 1, _img_len, _img_len) ?????
    '''

    X = np.zeros((len(df), 1, _img_len, _img_len), dtype=np.float32)

    for i, row in df.iterrows():
        img = Image.open(os.path.join(dir_images, row.filename))
        img = img.crop((row.left, row.top, row.right, row.bottom))
        img = img.convert('L')
        img = img.resize((_img_len, _img_len), resample=Image.BICUBIC)

        # ??????????1???0?float32????????
        img = np.asarray(img, dtype=np.float32)
        b, a = np.max(img), np.min(img)
        X[i, 0] = (b-img) / (b-a) if b > a else 0

    return X
项目:generative_models    作者:j-min    | 项目源码 | 文件源码
def get_transform(resize_crop='resize_and_crop', flip=True,
                  loadSize=286, fineSize=256):
    transform_list = []
    if resize_crop == 'resize_and_crop':
        osize = [loadSize, loadSize]
        transform_list.append(transforms.Resize(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(fineSize))
    elif resize_crop == 'crop':
        transform_list.append(transforms.RandomCrop(fineSize))
    elif resize_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, fineSize)))
    elif resize_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, loadSize)))
        transform_list.append(transforms.RandomCrop(fineSize))

    if flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list)
项目:mosaicshapes    作者:skiptomyliu    | 项目源码 | 文件源码
def save(self, path, dpi=300, is_continue=False):
        if self.is_diamond:
            diamond_img = util.mult_img_size(self.canvas_img.copy(), .5)
            diamond_img = diamond_img.rotate(-45, expand=False, resample=Image.BICUBIC)

            diamond_img = diamond_img.crop((
                (diamond_img.size[0] - self.target_size[0])/2 + self.pixels,
                (diamond_img.size[1] - self.target_size[1])/2 + self.pixels,
                self.target_size[0] + (diamond_img.size[0] - self.target_size[0])/2 - self.pixels,
                self.target_size[1] + (diamond_img.size[1] - self.target_size[1])/2 - self.pixels,
            ))
            diamond_img.save(path, "jpeg", icc_profile=self.og_image.info.get('icc_profile'),
                             quality=95, dpi=(dpi, dpi))
        else:
            grid_img = self.canvas_img.copy()
            if not is_continue:
                grid_img = self.crop_grid(grid_img, self.N)
            grid_img = util.restrain_img_size(grid_img, self.enlarge)
            grid_img.save(path, "jpeg", icc_profile=self.og_image.info.get('icc_profile'),
                          quality=95, dpi=(dpi,dpi))
项目:emojiGenerator    作者:jiuya    | 项目源码 | 文件源码
def getEmoji(self):
        img = Image.new("RGBA",self.imageSize,self.backColor)
        draw = ImageDraw.Draw(img)
        l = len(self.textList)

        for i in range(0,l):
            img_str = Image.new("RGBA",(len(self.textList[i])*128,128),self.backColor)
            draw = ImageDraw.Draw(img_str)
            (size,x0,y0,x1,y1) = self.cutEffectiveRange(self.textList[i],len(self.textList[i])*64,int(128/l))
            #(size,x0,y0,x1,y1) = self.cutEffectiveRange(self.textList[i],256,128/l)
            font = self.getFont(size)
            draw.text((x0,y0), self.textList[i], fill=self.fontColor, font=font)
            img_str.crop((0,0,x1,y1))
            if x1 > 128:
                img_str = img_str.transform(img_str.size,Image.AFFINE,(x1/128.0,0,0,0,1,0),Image.BICUBIC)
                image_paste_x = 0
            else:
                image_paste_x = int((128-x1)/2)
            if l != 1:
                img.paste(img_str,(image_paste_x,int((128/l)*i)))
            else:
                img.paste(img_str,(image_paste_x,int((128-y1)/2)))
        return img
项目:GAN_Liveness_Detection    作者:yunfan0621    | 项目源码 | 文件源码
def get_transform(opt):
    transform_list = []
    if opt.resize_or_crop == 'resize_and_crop':
        osize = [opt.loadSize, opt.loadSize]
        transform_list.append(transforms.Scale(osize, Image.BICUBIC))
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'crop':
        transform_list.append(transforms.RandomCrop(opt.fineSize))
    elif opt.resize_or_crop == 'scale_width':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.fineSize)))
    elif opt.resize_or_crop == 'scale_width_and_crop':
        transform_list.append(transforms.Lambda(
            lambda img: __scale_width(img, opt.loadSize)))
        transform_list.append(transforms.RandomCrop(opt.fineSize))

    if opt.isTrain and not opt.no_flip:
        transform_list.append(transforms.RandomHorizontalFlip())

    transform_list += [transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5),
                                            (0.5, 0.5, 0.5))]
    return transforms.Compose(transform_list)
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def resize_img(image, shape):
    return np.array(Image.fromarray(image).resize(shape, Image.BICUBIC))
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def resize_img(image):
    return np.array(Image.fromarray(image).resize((NEW_HEIGHT, NEW_WIDTH), Image.BICUBIC))
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def resize_img(image):
    return np.array(Image.fromarray(image).resize((NEW_HEIGHT, NEW_WIDTH), Image.BICUBIC))
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(224, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
项目:PaintsPytorch    作者:orashi    | 项目源码 | 文件源码
def CreateDataLoader(opt):
    random.seed(opt.manualSeed)

    # folder dataset
    CTrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    VTrans = transforms.Compose([
        RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    def jitter(x):
        ran = random.uniform(0.7, 1)
        return x * ran + 1 - ran

    STrans = transforms.Compose([
        transforms.Scale(opt.imageSize, Image.BICUBIC),
        transforms.ToTensor(),
        transforms.Lambda(jitter),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = ImageFolder(rootC=opt.datarootC,
                          rootS=opt.datarootS,
                          transform=CTrans,
                          vtransform=VTrans,
                          stransform=STrans
                          )

    assert dataset

    return data.DataLoader(dataset, batch_size=opt.batchSize,
                           shuffle=True, num_workers=int(opt.workers), drop_last=True)
项目:DeblurGAN    作者:KupynOrest    | 项目源码 | 文件源码
def __scale_width(img, target_width):
    ow, oh = img.size
    if (ow == target_width):
        return img
    w = target_width
    h = int(target_width * oh / ow)
    return img.resize((w, h), Image.BICUBIC)
项目:1-tk1-zener-learner    作者:mlennox    | 项目源码 | 文件源码
def create_perspective(img, factor):
    img_size = img.size
    w = img_size[0]
    h = img_size[1]
    shifts = generate_random_shifts(img_size, factor)
    coeffs = find_coeffs(
        [(shifts[0][0], shifts[0][1]),
            (w + shifts[1][0], shifts[1][1]),
            (w + shifts[2][0], h + shifts[2][1]),
            (shifts[3][0], h + shifts[3][1])], [(0, 0), (w, 0), (w, h), (0, h)])
    return img.transform((w, h), Image.PERSPECTIVE, coeffs, Image.BICUBIC)


# due to rotation and/or perspective we will need to fill in the background
项目:ParlAI    作者:facebookresearch    | 项目源码 | 文件源码
def img_to_ascii(self, path):
        im = Image.open(path)
        im.thumbnail((60, 40), Image.BICUBIC)
        im = im.convert('L')
        asc = []
        for y in range(0, im.size[1]):
            for x in range(0, im.size[0]):
                lum = 255 - im.getpixel((x, y))
                asc.append(_greyscale[lum * len(_greyscale) // 256])
            asc.append('\n')
        return ''.join(asc)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def resize(self, newSize):
        ''' Returns a resized version of the image. This is a convenience function.
        For more control, look at the Affine class for arbitrary transformations.
        @param newSize: tuple (new_width, new_height)
        @returns: a new pyvision image that is the resized version of this image.
        ''' 
        tmp = self.asPIL()
        if newSize[0] < self.size[0] or newSize[1] < self.size[1]:
            #because at least one dimension is being shrinked, we need to use ANTIALIAS filter
            tmp = tmp.resize(newSize, ANTIALIAS)        
        else:
            #use bicubic interpolation
            tmp = tmp.resize(newSize, BICUBIC)

        return pyvision.Image(tmp)

    # def scale(self, scale):
    #     ''' Returns a scaled version of the image. This is a convenience function.
    #     For more control, look at the Affine class for arbitrary transformations.
    #     @param scale: a float indicating the scale factor
    #     @returns: a new pyvision image that is the scaled version of this image.
    #     ''' 
    #     w,h = self.size
    #     new_size = (int(round(scale*w)),int(round(scale*h)))
    #     return self.resize(new_size)

    # # def copy(self):
    # #     '''
    # #     Returns a new pv.Image which is a copy of (only) the current image.
    # #     Other internal data stored by the current pv.Image will NOT be copied.
    # #     This method uses cv.CloneImage so that the underlying image data will be
    # #     disconnected from the original data. (Deep copy)
    # #     '''
    # #     imgdat = self.asOpenCV()
    # #     imgdat2 = cv.CloneImage(imgdat)
    # #     return pv.Image(imgdat2)
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def __init__(self, annotations=None, nrows=512, ncols=None, edge_width=EDGE_WIDTH):
        """

        :param annotations:  The LabelMe annotations
        :type annotations: Annotation
        """
        super(MultiLayerLabels, self).__init__()

        assert isinstance(annotations, Annotation)

        self.edge_width = dict()
        self.edge_width['facade'] = 15
        self.default_edge_width = edge_width
        self.annotations = annotations
        self.label_names = pyfacades.models.independant_12_layers.model.LABELS
        self.unknown_names = ['unknown', 'unlabeled']
        self.unknowns = [o.polygon for o in self.annotations if o.name in self.unknown_names]
        self.nlayers = len(self.label_names)
        self.nrows = nrows
        y_scale = nrows / float(annotations.imagesize.nrows)
        self.ncols = ncols if ncols is not None else int(y_scale*annotations.imagesize.ncols)

        self.data = np.zeros((self.nrows, self.ncols, self.nlayers+3), dtype=np.uint8)
        self.label_data = self.data[:, :, 3:]
        self.color_data = self.data[:, :, :3]
        self.set_colors(self.annotations.get_image().resize((self.ncols, self.nrows), resample=Image.BICUBIC))
项目:pehchaan    作者:achillesrasquinha    | 项目源码 | 文件源码
def predict(self):
        image = self.frame.image
        image.thumbnail(DHCD_INPUT_SIZE, Image.BICUBIC)

        arr   = _image_to_input(image)
        arr   = np.reshape(arr, (1, arr.size))
        out   = self.model.predict(arr)
        sym   = self.encoder.inverse_transform(out[0])

        self.frame.setOutput(sym)
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def predict_single(input_image,path,scale, out_height, out_width):
    """
        tensorflow op, process the input files in input_images
        return predicted 2x, 4x, 8x images
    """
    argument_sr.options.predict(1)
    factor = get_scale_factor(scale)

    hr2_predict, hr4_predict,hr8_predict = net.get_LasSRN(input_image)

    """
        determine the floor picture
    """
    if factor == 1:
        print('dsf')
        image = input_image
    elif factor == 2:
        image = hr2_predict
    elif factor == 4:
        image = hr4_predict



    """
        resize to the real
    """

    image = tf.image.resize_images(image[0], [out_height, out_width], method=tf.image.ResizeMethod.BICUBIC)
    image = tf.image.convert_image_dtype(image, dtype=tf.uint8)
    print(image)

    return image[:,:,0]
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def predict_PIL(input_path, output_dir, out_width, out_height):
    """
    API

    Args:
        input_path: {String}    ???????
        output_dir: {String}    ????????
        out_width:  {Number}    ????????
        out_height: {Number}    ???????

    Returns:
        {String}
        ?????:  ?????????? {String} eg?./test/887b1347e1a2.jpg
        ?????:
                 '!ERROR:input_path should be end with JPG,JPEG or PNG'  ??????? JPG  JPEG  PNG
                 '!ERROR:input_file do not exits'                        ???????
                 '!ERROR:output_dir do not exits'                        ???????????
                 '!ERROR:cannot identify image file 'xxx/xxx.jpg         ??????
                 '!ERROR:image cannot be zoom by different scale parameters'  ???????????
                 '!ERROR: model error'                                    ??????

    """
    res = _check_parameter(input_path, output_dir, out_height, out_width)
    if res[0] == '!':
        return res
    else:
        im = Image.open(input_path)
        im = im.resize((out_width,out_height),Image.BICUBIC)
        im.save(res[1])
        return res[1]