Python pygments 模块,token() 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用pygments.token()

项目:SwiftKitten    作者:johncsnyder    | 项目源码 | 文件源码
def get_blocks(tokens):
    """
    """
    block = []
    level = 0

    from pygments.token import Token

    for token, value in tokens:
        block.append((token,value))

        if value == ")":
            level += 1
        elif value == "(":
            level -= 1

        if level == 0:
            yield block[::-1]
            block = []
项目:leetcode    作者:thomasyimgit    | 项目源码 | 文件源码
def highlight(self, block):
        """Method called on each block to highlight it content"""
        tokens = pygments.lex(block, self.python_lexer)
        if self.format_rst:
            from pygments.token import Token
            toks = []
            for token in tokens:
                if token[0] == Token.String.Doc and len(token[1]) > 6:
                    toks += pygments.lex(token[1][:3], self.python_lexer)
                    # parse doc string content by rst lexer
                    toks += pygments.lex(token[1][3:-3], self.rst_lexer)
                    toks += pygments.lex(token[1][-3:], self.python_lexer)
                elif token[0] == Token.Comment.Single:
                    toks.append((Token.Comment.Single, token[1][0]))
                    # parse comment content by rst lexer
                    # remove the extrat newline added by rst lexer
                    toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
                else:
                    toks.append(token)
            tokens = toks
        return pygments.format(tokens, self.formatter)
项目:diff-highlight-tokens    作者:Met48    | 项目源码 | 文件源码
def convert_to_lines(tokens):
    """
    Given a stream of tokens, yield lines as strings.

    Each output string is guaranteed to end with a newline.

    """
    line = []
    for token in tokens:
        text = get_token_text(token)
        line.append(text)
        if text == '\n':
            yield ''.join(line)
            line = []
    if line:
        line.append('\n')
        yield ''.join(line)
项目:yatta_reader    作者:sound88    | 项目源码 | 文件源码
def highlight(self, block):
        """Method called on each block to highlight it content"""
        tokens = pygments.lex(block, self.python_lexer)
        if self.format_rst:
            from pygments.token import Token
            toks = []
            for token in tokens:
                if token[0] == Token.String.Doc and len(token[1]) > 6:
                    toks += pygments.lex(token[1][:3], self.python_lexer)
                    # parse doc string content by rst lexer
                    toks += pygments.lex(token[1][3:-3], self.rst_lexer)
                    toks += pygments.lex(token[1][-3:], self.python_lexer)
                elif token[0] == Token.Comment.Single:
                    toks.append((Token.Comment.Single, token[1][0]))
                    # parse comment content by rst lexer
                    # remove the extrat newline added by rst lexer
                    toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
                else:
                    toks.append(token)
            tokens = toks
        return pygments.format(tokens, self.formatter)
项目:SwiftKitten    作者:johncsnyder    | 项目源码 | 文件源码
def _serialize_token(self, pair):
        """Get string representation of (token, value) pair.
        """
        from pygments.token import Token
        token, value = pair
        # for literals, autocomplete only depends
        # on type of argument, not the value
        if token in [Token.Literal.Number.Float,
                     Token.Literal.Number.Integer,
                     Token.Literal.String]:
            return str(token)
        else:
            return value
项目:SwiftKitten    作者:johncsnyder    | 项目源码 | 文件源码
def get_autocomplete_stub(lexer, text):
    """
    """
    entity = []

    from pygments.token import Token

    # ignored tokens
    ignored = [Token.Comment, Token.Text, Token.Text.Whitespace, Token.Comment.Single]
    filtered = lambda pair: pair[0] not in ignored  # pair = (token,value)

    tokens = filter(filtered, get_tokens_reversed(lexer, text))
    blocks = get_blocks(tokens)
    block = next(blocks, [])

    if len(block) == 1 and block[0][1] == ".":
        block = next(blocks, [])

        if len(block) > 0 and block[0][1] == "(":
            block_ = next(blocks, [])

            if len(block_) == 1 and block[0][0] is Token.Name:
                return block_ + block

        return block

    return []
项目:jsoncut    作者:json-transformations    | 项目源码 | 文件源码
def get_style(style=STYLE):
    """Load Pygments custom style."""
    def getattrs(obj, names):
        return reduce(getattr, names.split('_'), obj)
    return {getattrs(pygments.token, k): (v,) * 2 for k, v in style.items()}
项目:diff-highlight-tokens    作者:Met48    | 项目源码 | 文件源码
def tokenize(text, lexer=None):
    """
    Split text into (token_type, token_text) pairs using the given lexer

    When there is no lexer, it will split by words instead.

    """
    if lexer is None:
        return [(pygments.token.Text, word) for word in split_words(text)]

    tokens = lexer.get_tokens(text)
    tokens = group_tokens(tokens)

    return tokens
项目:diff-highlight-tokens    作者:Met48    | 项目源码 | 文件源码
def group_tokens(tokens):
    """
    Join and separate tokens to be more suitable for diffs.

    Transformations:
    - Empty tokens are removed
    - Text containing newlines is split to have the newline be one token
    - Other sequential whitespace tokens are joined
    - Token types which contain freeform text (ie. comments, strings) are split into words

    """
    for token_type, group in itertools.groupby(tokens, get_token_type):
        if any(token_type in type_set for type_set in JOIN_TOKENS):
            text = ''.join(get_token_text(token) for token in group)
            group = [(token_type, text)]
        if any(token_type in type_set for type_set in WORD_TOKENS):
            group = (
                (token_type, word)
                for token in group
                for word in split_words(get_token_text(token))
            )
        # Split by newlines
        for token in group:
            text_parts = re.split(r'(\n)', get_token_text(token))
            for text_part in text_parts:
                # Empty tokens are discarded, to avoid confusing
                # difflib or highlighting empty regions
                if text_part:
                    yield (token_type, text_part)
项目:diff-highlight-tokens    作者:Met48    | 项目源码 | 文件源码
def persist_highlighting(tokens):
    """
    Given a stream of tokens, yield tokens with additional
    START_HIGHLIGHT and END_HIGHLIGHT tokens inserted to persist
    highlighting across tokens with a newline '\n' as text.

    """
    should_be_highlighting = False
    is_highlighting = False
    for token in tokens:
        token_type = get_token_type(token)
        if token_type == 'START_HIGHLIGHT':
            assert not should_be_highlighting, 'Multiple attempts to start highlighting'
            should_be_highlighting = True
        elif token_type == 'END_HIGHLIGHT':
            assert should_be_highlighting, 'Attempt to end highlighting while not highlighting'
            should_be_highlighting = False
        else:
            if get_token_text(token) == '\n':
                if is_highlighting:
                    yield ('END_HIGHLIGHT', '')
                    is_highlighting = False
            elif is_highlighting is not should_be_highlighting:
                if should_be_highlighting:
                    yield ('START_HIGHLIGHT', '')
                else:
                    yield ('END_HIGHLIGHT', '')
                is_highlighting = should_be_highlighting
            yield token
项目:diff-highlight-tokens    作者:Met48    | 项目源码 | 文件源码
def fill_highlighting_text(tokens, highlight=FORMAT_INVERT, reset=FORMAT_RESET_INVERT):
    """
    Given a stream of tokens, yield tokens where highlighting tokens
    have formatting text

    """
    for token in tokens:
        token_type = get_token_type(token)
        if token_type == 'START_HIGHLIGHT':
            yield ('START_HIGHLIGHT', highlight)
        elif token_type == 'END_HIGHLIGHT':
            yield ('END_HIGHLIGHT', reset)
        else:
            yield token
项目:porcupine    作者:Akuli    | 项目源码 | 文件源码
def header_callback(lexer, match):
        # highlight correct filetype names specially
        if match.group(1) in filetypes or match.group(1) == 'DEFAULT':
            yield (match.start(), pygments.token.Keyword, match.group(0))
        else:
            yield (match.start(), pygments.token.Text, match.group(0))
项目:porcupine    作者:Akuli    | 项目源码 | 文件源码
def key_val_pair(key, value, key_token=pygments.token.Name.Builtin,
                     value_token=pygments.token.String):
        for regex, token in [(value, value_token),
                             (r'.*?', pygments.token.Name)]:
            yield (
                r'(%s)([^\S\n]*)(=)([^\S\n]*)(%s)$' % (key, regex),
                pygments.lexer.bygroups(
                    key_token, pygments.token.Text,
                    pygments.token.Operator, pygments.token.Text, token))
项目:porcupine    作者:Akuli    | 项目源码 | 文件源码
def _on_config_changed(self, junk=None):
        # when the font family or size changes, self.textwidget['font']
        # also changes because it's a porcupine.textwiddet.ThemedText widget
        fontobject = tkfont.Font(name=self.textwidget['font'], exists=True)
        font_updates = fontobject.actual()
        del font_updates['weight']     # ignore boldness
        del font_updates['slant']      # ignore italicness

        for (bold, italic), font in self._fonts.items():
            # fonts don't have an update() method
            for key, value in font_updates.items():
                font[key] = value

        # http://pygments.org/docs/formatterdevelopment/#styles
        # all styles seem to yield all token types when iterated over,
        # so we should always end up with the same tags configured
        style = pygments.styles.get_style_by_name(config['pygments_style'])
        for tokentype, infodict in style:
            # this doesn't use underline and border
            # i don't like random underlines in my code and i don't know
            # how to implement the border with tkinter
            key = (infodict['bold'], infodict['italic'])   # pep8 line length
            kwargs = {'font': self._fonts[key]}
            if infodict['color'] is None:
                kwargs['foreground'] = ''    # reset it
            else:
                kwargs['foreground'] = '#' + infodict['color']
            if infodict['bgcolor'] is None:
                kwargs['background'] = ''
            else:
                kwargs['background'] = '#' + infodict['bgcolor']

            self.textwidget.tag_config(str(tokentype), **kwargs)

            # make sure that the selection tag takes precedence over our
            # token tag
            self.textwidget.tag_lower(str(tokentype), 'sel')

    # handle things from the highlighting process
项目:gnatdashboard    作者:AdaCore    | 项目源码 | 文件源码
def log(self, tokens, stream=sys.stdout):
        """Log the input token stream with the standard Python logging
        mecanism.

        PARAMETERS
            log_fn: the logging function to use
            tokens: the input tokens stream
        """

        assert self.formatter is not None, 'Internal error'
        print >> stream, pygments.format(tokens, self.formatter)
        stream.flush()
项目:gnatdashboard    作者:AdaCore    | 项目源码 | 文件源码
def status_token(status):
        """Return the token to use for the given test case result status.

        RETURNS
            a pygments Token
        """

        return getattr(Token.TestResult, status, Token.Error)
项目:gnatdashboard    作者:AdaCore    | 项目源码 | 文件源码
def format_testcase_diff(diff):
        """Format a testcase output diff.

        PARAMETERS
            diff: the diff content

        RETURNS
            a list of pygments' Tokens
        """

        def new_line_token():
            """Generate a new line token."""
            return Token.Whitespace, '\n'

        def indent_token():
            """Generate an indentation space token."""
            return Token.Whitespace, ' ' * 4

        tokens = []
        new_line = True

        # Because of logging prefixes, skip the first line to avoid
        # misalignment.
        tokens.append(new_line_token())

        for ttype, value in pygments.lex(diff, DiffLexer()):
            for subval in value.split('\n'):
                if new_line:
                    tokens.append(indent_token())

                new_line = not subval

                if subval:
                    tokens.append((ttype, subval))
                else:
                    tokens.append(new_line_token())

        return tokens
项目:SwiftKitten    作者:johncsnyder    | 项目源码 | 文件源码
def run(self, edit):
        """
        """
        view = self.view
        sel = view.sel()

        if len(sel) == 0:
            return

        a,b = sel[0]
        query = view.substr(view.word(a)) if a == b else view.substr(sel[0])

        if query == "":
            return

        # run docsetutil command
        docset = SwiftKittenEventListener.get_settings(view, "docset")
        cmd = self.get_docsetutil_cmd(view, docset, query)
        results = check_output(cmd, stderr=STDOUT)
        results = str(results, 'utf-8')

        if len(results) == 0:
            print("No documentation found.")
            return

        lines = results.splitlines()

        # split each line into two paths
        pairs = map(lambda line: line.strip().split("   "), lines)

        get_lang = lambda a: a.split('/')[0]
        get_path = lambda a,b: os.path.join(os.path.dirname(a),
            os.path.basename(b))

        #
        docs = {get_lang(a) : get_path(a,b) for a,b in pairs}

        # prefer Swift, Objective-C, C
        lang = sorted(docs.keys())[-1]

        # construct path to documentation token
        path = os.path.join(self.get_tokens_path(docset), docs[lang] + ".xml")

        # read documentation file
        with open(path, "rb") as f:
            xml = f.read()

        # convert xml to html
        html = str(self.convert_docs_to_html(xml), "utf-8")

        #
        # TO DO:
        # add on_navigate handler
        #

        # display documentation
        view.show_popup(html, max_width=400, max_height=600)