Python PIL.ImageOps 模块,grayscale() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用PIL.ImageOps.grayscale()

项目:PokeNoxBot    作者:tellomichmich    | 项目源码 | 文件源码
def UpdateTrainerLevel():
    img = GetScreen()
    TrainerLevelZone = (65, 730, 65+45, 730+25)
    img = img.crop((TrainerLevelZone))
    img = ImageOps.grayscale(img)
    HighContrast(img, 220)
    img = ImageChops.invert(img)
    NewTrainerLevel = ImgToString(img, "0123456789")
    try:
        NewTrainerLevel = int(NewTrainerLevel)
        if NewTrainerLevel >= 1 and NewTrainerLevel <= 40 and NewTrainerLevel >= GetTrainerLevel():
            SetTrainerLevel(NewTrainerLevel)
            return True
    except:
        pass
    return False
项目:IV    作者:collinmutembei    | 项目源码 | 文件源码
def apply_effects(image, effects):
    """method to apply effects to original image from list of effects
    """
    for effect in effects:
        gray = ImageOps.grayscale(image)
        # dictionary with all the availble effects
        all_effects = {
            'BLUR': image.filter(ImageFilter.BLUR),
            'CONTOUR': image.filter(ImageFilter.CONTOUR),
            'EMBOSS': image.filter(ImageFilter.EMBOSS),
            'SMOOTH': image.filter(ImageFilter.SMOOTH),
            'HULK': ImageOps.colorize(gray, (0, 0, 0, 0), '#00ff00'),
            'FLIP': ImageOps.flip(image),
            'MIRROR': ImageOps.mirror(image),
            'INVERT': ImageOps.invert(image),
            'SOLARIZE': ImageOps.solarize(image),
            'GREYSCALE': ImageOps.grayscale(image),

        }
        phedited = all_effects[effect]
        image = phedited
    return phedited
项目:pytorch.rl.learning    作者:moskomule    | 项目源码 | 文件源码
def _observation(self, frame):
        frame = Image.fromarray(frame)
        frame = ImageOps.grayscale(frame)
        frame = frame.resize((self.width, self.height))
        return np.array(frame)
项目:Comicolorization    作者:DwangoMediaVillage    | 项目源码 | 文件源码
def get_example(self, i):
        # type: (any) -> typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
        """
        :return: (RGB array [0~255], gray array [0~255], RGB array [0~255])
        """
        image = self.base[i]
        rgb_image_data = numpy.asarray(image, dtype=self._dtype).transpose(2, 0, 1)[:3, :, :]
        gray_image = ImageOps.grayscale(image)
        gray_image_data = numpy.asarray(gray_image, dtype=self._dtype)[:, :, numpy.newaxis].transpose(2, 0, 1)
        return rgb_image_data, gray_image_data, rgb_image_data
项目:SDV-Summary    作者:Sketchy502    | 项目源码 | 文件源码
def tintImage(img, tint):
    i = colorize(grayscale(img), (0, 0, 0), tint)
    i.putalpha(img.split()[3])
    return i


# Crops sprite from Spritesheet
项目:deel    作者:uei    | 项目源码 | 文件源码
def received_message(self, m):
        global depth_image
        payload = m.data

        dat = msgpack.unpackb(payload)
        screen = Image.open(io.BytesIO(bytearray(dat['image'])))
        x = screen
        reward = dat['reward']
        end_episode = dat['endEpisode']

        depth_image = ImageOps.grayscale(Image.open(io.BytesIO(bytearray(dat['depth']))))

        if not self.agent_initialized:
            self.agent_initialized = True

            AgentServer.mode='start'
            action = workout(x)
            self.send(str(action))
            with open(self.log_file, 'w') as the_file:
                the_file.write('cycle, episode_reward_sum \n')          
        else:
            self.thread_event.wait()
            self.cycle_counter += 1
            self.reward_sum += reward

            if end_episode:
                AgentServer.mode='end'
                workout(x)
                #self.agent.agent_end(reward)
                AgentServer.mode='start'
                #action = self.agent.agent_start(image)  # TODO
                action = workout(x)
                self.send(str(action))
                with open(self.log_file, 'a') as the_file:
                    the_file.write(str(self.cycle_counter) +
                                   ',' + str(self.reward_sum) + '\n')
                self.reward_sum = 0

            else:
                #action, rl_action, eps, Q_now, obs_array, returnAction = self.agent.agent_step(reward, image)
                #self.agent.agent_step_after(reward, image, rl_action, eps, Q_now, obs_array, returnAction)
                AgentServer.mode='step'
                ag,action, eps, Q_now, obs_array = workout(x)
                self.send(str(action))
                ag.step_after(reward, action, eps, Q_now, obs_array)

        self.thread_event.set()