Python PIL.ImageFilter 模块,SHARPEN 实例源码

我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用PIL.ImageFilter.SHARPEN

项目:danube-delta    作者:honzajavorek    | 项目源码 | 文件源码
def import_image(src_filename, dest_filename):
    if os.path.isfile(dest_filename):
        click.echo('Already exists, skipping: {}'.format(dest_filename))
    else:
        with Image.open(src_filename) as image:
            image.thumbnail((IMAGE_MAX_SIZE, IMAGE_MAX_SIZE))

            try:
                image.filter(ImageFilter.SHARPEN)
            except ValueError:
                pass  # skip filtering for images which do not support it

            click.echo('Saving: {}'.format(dest_filename))
            options = dict(IMAGE_SAVE_OPTIONS)
            if is_animated(image):
                options.setdefault('save_all', True)
            image.save(dest_filename, image.format, **options)
项目:gougo    作者:amaozhao    | 项目源码 | 文件源码
def filters(im, detail=False, sharpen=False, **kwargs):
    """
    Pass the source image through post-processing filters.

    sharpen
        Sharpen the thumbnail image (using the PIL sharpen filter)

    detail
        Add detail to the image, like a mild *sharpen* (using the PIL
        ``detail`` filter).

    """
    if detail:
        im = im.filter(ImageFilter.DETAIL)
    if sharpen:
        im = im.filter(ImageFilter.SHARPEN)
    return im
项目:danube-delta    作者:honzajavorek    | 项目源码 | 文件源码
def create_thumbnail(filename):
    tn_dir = os.path.join(os.path.dirname(filename), THUMBNAILS_PATH)
    tn_filename = os.path.join(tn_dir, os.path.basename(filename))

    if not os.path.isfile(tn_filename):
        logger.info('Creating thumbnail for %s', filename)
        os.makedirs(tn_dir, exist_ok=True)
        image = Image.open(filename)
        image.thumbnail((IMG_MAX_SIZE, IMG_MAX_SIZE))
        image.filter(ImageFilter.SHARPEN)
        image.save(tn_filename, image.format, **THUMBNAIL_SAVE_OPTIONS)
    return tn_filename
项目:Imagyn    作者:zevisert    | 项目源码 | 文件源码
def sharpen(img):
    """
    Sharpen Image
    :param img: PIL image object
    :return: PIL image object
    """
    img = img.filter(ImageFilter.SHARPEN)
    return img


# Apply a smooth filter to the image to smooth edges (blurs)
项目:inception-face-shape-classifier    作者:adonistio    | 项目源码 | 文件源码
def sharpen_img(imdir,outdir):
    im = Image.open(imdir)
    out_filename = outdir
    im.filter(ImageFilter.SHARPEN).save(out_filename, 'JPEG', quality=100)
项目:ocr-with-django    作者:abarto    | 项目源码 | 文件源码
def post(self, request, *args, **kwargs):
        with PyTessBaseAPI() as api:
            with Image.open(request.FILES['image']) as image:
                sharpened_image = image.filter(ImageFilter.SHARPEN)
                api.SetImage(sharpened_image)
                utf8_text = api.GetUTF8Text()

        return JsonResponse({'utf8_text': utf8_text})
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        print "\n\n status in captcha : ", status
        print "\n link in captcha : ", link
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                proxy = urllib2.ProxyHandler({'http': 'http://14.142.4.33'})
                opener = urllib2.build_opener(proxy)
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        print "\n\n status in captcha : ", status
        print "\n link in captcha : ", link
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                proxy = urllib2.ProxyHandler({'http': 'http://14.142.4.33'})
                opener = urllib2.build_opener(proxy)
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                proxy = urllib2.ProxyHandler({'http': 'http://14.142.4.33'})
                opener = urllib2.build_opener()
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    print "\n\n status in captcha : ", status
                    print "\n link in captcha : ", link
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        print "\n\n status in captcha : ", status
        print "\n link in captcha : ", link
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                proxy = urllib2.ProxyHandler({'http': 'http://14.142.4.33'})
                opener = urllib2.build_opener()
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                proxy = urllib2.ProxyHandler({'http': 'http://14.142.4.33'})
                opener = urllib2.build_opener()
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    print "\n\n status in captcha : ", status
                    print "\n link in captcha : ", link
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                proxy = urllib2.ProxyHandler({'http': 'http://14.142.4.33'})
                opener = urllib2.build_opener()
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    print "\n\n status in captcha : ", status
                    print "\n link in captcha : ", link
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(self, link, status):
        print "\n\n status in captcha : ", status
        print "\n link in captcha : ", link
        try:
            if status == 0:
                #proxies = ['http://43.242.104.43', 'http://115.113.43.215', 'http://115.113.43.215']
                #proxy = random.choice(proxies)
                #proxy = urllib2.ProxyHandler({'http': 'http://115.113.43.215'})
                opener = urllib2.build_opener()
                header = ua.random
                print "\n header : ", header
                print "\n link : ", link
                opener.addheaders = [('User-agent', header)]
                data = opener.open(link).read()

                soup = BeautifulSoup(data, 'html.parser')
                div1 = soup.find("div", {"class": "a-row a-text-center"})
                if div1 is not None:
                    img = div1.find("img")
                    image = img["src"]
                    print "\n captcha.."
                    print "image : ", image
                    image = Image.open(StringIO(requests.get(image).content))
                    image.filter(ImageFilter.SHARPEN)
                    captcha = pytesseract.image_to_string(image)
                    print "captcha : ", captcha
                    values = {'field-keywords' : captcha}
                    data = urllib.urlencode(values)
                    req = urllib2.Request(link, data, {'User-agent': header})
                    resp = urllib2.urlopen(req)
                    the_page = resp.read()
                    self.parse_captcha(link, status)
                else:
                    status = 1
                    return
        except Exception as e:
            print "\n Exception : ", e
项目:amazon    作者:parul1931    | 项目源码 | 文件源码
def parse_captcha(link, status):
    if status == 0:
        opener = urllib2.build_opener()
        header = ua.random
        print "\n header : ", header
        print "\n link : ", link
        opener.addheaders = [('User-agent', header)]
        response = opener.open(link)
        data = response.read()
        # code = response.getcode()
        # log = "\n\n\n\n header : {header} \n url : {url} \n response : {response}".format(header=header, url=link, response=code)
        # logging.debug(log)
        #print "log : ", log

        soup = BeautifulSoup(data, 'html.parser')
        div1 = soup.find("div", {"class": "a-row a-text-center"})
        if div1 is not None:
            img = div1.find("img")
            image = img["src"]
            print "\n captcha.."
            print "image : ", image
            image = Image.open(StringIO(requests.get(image).content))
            image.filter(ImageFilter.SHARPEN)
            captcha = pytesseract.image_to_string(image)
            print "captcha : ", captcha
            values = {'field-keywords' : captcha}
            data = urllib.urlencode(values)
            req = urllib2.Request(link, data, {'User-agent': header})
            resp = urllib2.urlopen(req)
            the_page = resp.read()
            parse_captcha(link, status)
        else:
            status = 1
            return
    else:
        return
项目:Epsilon    作者:Capuno    | 项目源码 | 文件源码
def cmd_info(message, parameters, recursion=0):
    async for msg in client.logs_from(message.channel, limit=25):
        try:
            if msg.attachments:
                image = Image.open(BytesIO(requests.get(msg.attachments[0]['url']).content)).filter(ImageFilter.SHARPEN)
                text = pytesseract.image_to_string(image)
                if not text:
                    e = discord.Embed(colour=0xB5434E)
                    e.description = "I just forgot how to read..."
                else:
                    e = discord.Embed(colour=0x43B581)
                    e.description = text
                await client.send_message(message.channel, embed=e)
                return

        except OSError:
            e = discord.Embed(colour=0xB5434E)
            e.description = "Image way big, are you trying to kill me?"
            await client.send_message(message.channel, embed=e)
            return
        except TypeError:
            e = discord.Embed(colour=0xB5434E)
            e.description = "Latest attachment is not a static image, try again."
            await client.send_message(message.channel, embed=e)
            return
        except:
            e = discord.Embed(colour=0xB5434E)
            e.description = "Error ocurred, not related to OSError or TypeError I guess."
            await client.send_message(message.channel, embed=e)
            return
    e = discord.Embed(colour=0xB5434E)
    e.description = "I can't find an image in the last 25 posts, that or I'm retarded."
    await client.send_message(message.channel, embed=e)
项目:plumeria    作者:sk89q    | 项目源码 | 文件源码
def sharpen(message, im):
    """
    Applies a sharpen effect.

    Example::

        /drawtext Hello there! | sharpen

    Requires an input image.
    """
    return im.filter(ImageFilter.SHARPEN)
项目:tensormsa_old    作者:TensorMSA    | 项目源码 | 文件源码
def resize_file_image(self, path, net_info, format_info, file_name, label):
        """
        load uploaded image and resize
        :param path:
        :return:
        """
        x_size = int(format_info['x_size'])
        y_size = int(format_info['y_size'])
        dataframe = net_info['dir']
        table = net_info['table']

        im = Image.open(path).convert('L')
        width = float(im.size[0])
        height = float(im.size[1])
        newImage = Image.new('L', (x_size, y_size), (255))

        if width > height:
            nheight = int(round((x_size / width * height), 0))
            img = im.resize((x_size, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
            wtop = int(round(((y_size - nheight) / 2), 0))
            newImage.paste(img, (4, wtop))
        else:
            nwidth = int(round((x_size / height * width), 0))
            if (nwidth == 0):
                nwidth = 1

            img = im.resize((nwidth, y_size), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
            wleft = int(round(((y_size - nwidth) / 2), 0))
            newImage.paste(img, (wleft, 4))
        width, height = newImage.size

        #save preview on jango static folder
        self.save_preview_image(newImage, dataframe, table, file_name, label)
        return newImage.getdata(), width, height
项目:tensormsa_old    作者:TensorMSA    | 项目源码 | 文件源码
def simple_resize(self, path, x_size, y_size):
        """
        simply resize image and return array
        :param path:
        :return:
        """
        x_size = int(x_size)
        y_size = int(y_size)
        im = Image.open(path).convert('L')
        width = float(im.size[0])
        height = float(im.size[1])
        newImage = Image.new('L', (x_size, y_size), (255))

        if width > height:
            nheight = int(round((x_size / width * height), 0))
            img = im.resize((x_size, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
            wtop = int(round(((y_size - nheight) / 2), 0))
            newImage.paste(img, (4, wtop))
        else:
            nwidth = int(round((x_size / height * width), 0))
            if (nwidth == 0):
                nwidth = 1

            img = im.resize((nwidth, y_size), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
            wleft = int(round(((y_size - nwidth) / 2), 0))
            newImage.paste(img, (wleft, 4))

        return newImage.getdata()
项目:tensormsa_old    作者:TensorMSA    | 项目源码 | 文件源码
def simple_resize(self, path, x_size, y_size):
        """
        simply resize image and return array
        :param path:
        :return:
        """

        im = Image.open(path).convert('L')
        width = float(im.size[0])
        height = float(im.size[1])
        newImage = Image.new('L', (x_size, y_size), (255))

        if width > height:
            nheight = int(round((x_size / width * height), 0))
            img = im.resize((x_size, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
            wtop = int(round(((y_size - nheight) / 2), 0))
            newImage.paste(img, (4, wtop))
        else:
            nwidth = int(round((x_size / height * width), 0))
            if (nwidth == 0):
                nwidth = 1

            img = im.resize((nwidth, y_size), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
            wleft = int(round(((y_size - nwidth) / 2), 0))
            newImage.paste(img, (wleft, 4))

        return list(newImage.getdata())
项目:tensormsa_old    作者:TensorMSA    | 项目源码 | 文件源码
def simple_resize(path, x_size, y_size):
    """
    simply resize image and return array
    :param path:
    :return:
    """

    im = Image.open(path).convert('L')
    width = float(im.size[0])
    height = float(im.size[1])
    newImage = Image.new('L', (x_size, y_size), (255))

    if width > height:
        nheight = int(round((x_size / width * height), 0))
        img = im.resize((x_size, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
        wtop = int(round(((y_size - nheight) / 2), 0))
        newImage.paste(img, (4, wtop))
    else:
        nwidth = int(round((x_size / height * width), 0))
        if (nwidth == 0):
            nwidth = 1

        img = im.resize((nwidth, y_size), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
        wleft = int(round(((y_size - nwidth) / 2), 0))
        newImage.paste(img, (wleft, 4))

    return list(newImage.getdata())
项目:mnist_CNN    作者:junqiangchen    | 项目源码 | 文件源码
def imageprepare(imagepath):
    """
    This function returns the pixel values.
    The imput is a png file location.
    """
    im = Image.open(imagepath).convert('L')
    width = float(im.size[0])
    height = float(im.size[1])
    newImage = Image.new('L', (28, 28), (255))  # creates white canvas of 28x28 pixels

    if width > height:  # check which dimension is bigger
        # Width is bigger. Width becomes 20 pixels.
        nheight = int(round((20.0 / width * height), 0))  # resize height according to ratio width
        if (nheight == 0):  # rare case but minimum is 1 pixel
            nheigth = 1
            # resize and sharpen
        img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
        wtop = int(round(((28 - nheight) / 2), 0))  # caculate horizontal pozition
        newImage.paste(img, (4, wtop))  # paste resized image on white canvas
    else:
        # Height is bigger. Heigth becomes 20 pixels.
        nwidth = int(round((20.0 / height * width), 0))  # resize width according to ratio height
        if (nwidth == 0):  # rare case but minimum is 1 pixel
            nwidth = 1
            # resize and sharpen
        img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
        wleft = int(round(((28 - nwidth) / 2), 0))  # caculate vertical pozition
        newImage.paste(img, (wleft, 4))  # paste resized image on white canvas

    # newImage.save("sample.png")

    tv = list(newImage.getdata())  # get pixel values

    # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
    tva = [(255 - x) * 1.0 / 255.0 for x in tv]
    return tva
    # print(tva)
项目:photoberry    作者:briandilley    | 项目源码 | 文件源码
def create_strip(self, resolution_ratio=None):
        """
        Combines the images in taken_photos into one
        :return: the combined image
        """

        if not resolution_ratio:
            resolution_ratio = self.strip_resolution_ratio

        padding = 40
        photo_width = int(self.photo_resolution[0] * resolution_ratio)
        photo_height = int(self.photo_resolution[1] * resolution_ratio)
        width = (photo_width * 2) + (padding * 4)
        height = (photo_height * self.picture_count) + (padding * (self.picture_count + 1))

        strip = Image.new('RGB', (width, height))
        canvas = ImageDraw.Draw(strip)
        canvas.rectangle((0, 0, width, height), fill=ImageColor.getcolor('#ffffff', 'RGB'))

        for i in range(0, self.picture_count):
            image = Image.open(self.pictures_taken[i])
            image = image.convert(mode='RGB')
            image = image.resize((photo_width, photo_height), resample=Image.LANCZOS)
            strip.paste(image, box=(
                padding,
                padding + (padding * i) + (photo_height * i)
            ))
            strip.paste(image, box=(
                padding + photo_width + padding + padding,
                padding + (padding * i) + (photo_height * i)
            ))
            del image

        strip = strip.transpose(Image.FLIP_LEFT_RIGHT)
        strip = strip.filter(ImageFilter.DETAIL)
        strip = strip.filter(ImageFilter.SHARPEN)

        (handle, file_name) = mkstemp(suffix='.jpg', prefix='photoberry-strip')
        os.close(handle)
        handle = open(file_name, 'wb')
        strip.save(handle, format='jpeg', quality=95, optimize=True)
        handle.close()
        handle.close()
        del strip
        return file_name