Python token 模块,tok_name() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用token.tok_name()

项目:heaviside    作者:jhuapl-boss    | 项目源码 | 文件源码
def __init__(self, code, value, start=(0,0), stop=(0,0), line=''):
        """
        Args:
            code (string|int): Token code. Ints are translated using token.tok_name.
            value (string): Token value
            start (tuple): Pair of values describing token start line, start position
            stop (tuple): Pair of values describing token stop line, stop position
            line (string): String containing the line the token was parsed from
        """
        try:
            self.code = token.tok_name[code]
        except:
            self.code = code
        self.value = value
        self.start = start
        self.stop = stop
        self.line = line
项目:asttokens    作者:gristlabs    | 项目源码 | 文件源码
def token_repr(tok_type, string):
  """Returns a human-friendly representation of a token with the given type and string."""
  # repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
  return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u'))
项目:scipy-lecture-notes-zh-CN    作者:jayleicn    | 项目源码 | 文件源码
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = file(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    tokens = tokenize.generate_tokens(lines.__iter__().next)
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif ((tok_type == 'STRING') and check_docstring):
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row
项目:dyfunconn    作者:makism    | 项目源码 | 文件源码
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = open(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring + 1 + start_row, erow + 1 + start_row
项目:polylearn    作者:scikit-learn-contrib    | 项目源码 | 文件源码
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row
项目:python-driver    作者:bblfsh    | 项目源码 | 文件源码
def __init__(self, type_: int, value: str, start: Tuple[int, int],
            end: Tuple[int, int], rawvalue: str) -> None:
        self.type = type_
        self.name = token_module.tok_name[type_]
        self.value = value
        self.start = TokenPos(*start)
        self.end = TokenPos(*end)
        self.rawvalue = rawvalue
项目:tailbiter    作者:darius    | 项目源码 | 文件源码
def skim_token(t):
    print(T.tok_name[t.type], T.tok_name[t.exact_type], t.string)
    return
    if T.tok_name[t.type] == T.tok_name[t.exact_type]:
        print(T.tok_name[t.type], t.string)
    else:
        print(T.tok_name[t.type], T.tok_name[t.exact_type], t.string)
项目:tailbiter    作者:darius    | 项目源码 | 文件源码
def print_token(t):
#    print(t.count)
#    print(t.index)
#    print()
    print('line', t.line)
    print('start', t.start)
    print('end', t.end)
    print('string', t.string)
    print('type', t.type, T.tok_name[t.type])
    print('exact_type', t.exact_type, T.tok_name[t.exact_type])
    print()
项目:terra    作者:UW-Hydro    | 项目源码 | 文件源码
def extract_docstring(self):
        """ Extract a module-level docstring
        """
        lines = open(self.filename).readlines()
        start_row = 0
        if lines[0].startswith('#!'):
            lines.pop(0)
            start_row = 1

        docstring = ''
        first_par = ''
        tokens = tokenize.generate_tokens(lines.__iter__().__next__)
        for tok_type, tok_content, _, (erow, _), _ in tokens:
            tok_type = token.tok_name[tok_type]
            if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
                continue
            elif tok_type == 'STRING':
                docstring = eval(tok_content)
                # If the docstring is formatted with several paragraphs,
                # extract the first one:
                paragraphs = '\n'.join(line.rstrip()
                                       for line in docstring.split('\n')
                                       ).split('\n\n')
                if len(paragraphs) > 0:
                    first_par = paragraphs[0]
            break

        thumbloc = None
        for i, line in enumerate(docstring.split("\n")):
            m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
            if m:
                thumbloc = float(m.group(1)), float(m.group(2))
                break
        if thumbloc is not None:
            self.thumbloc = thumbloc
            docstring = "\n".join([l for l in docstring.split("\n")
                                   if not l.startswith("_thumb")])

        self.docstring = docstring
        self.short_desc = first_par
        self.end_line = erow + 1 + start_row
项目:scipy-lecture-notes-zh-CN    作者:jayleicn    | 项目源码 | 文件源码
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    lines = file(filename).readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    tokens = tokenize.generate_tokens(iter(lines).next)
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery",
                                         "Please check your example's layout",
                                         " and make sure it's correct")
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0])
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0])
项目:dyfunconn    作者:makism    | 项目源码 | 文件源码
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    lines = open(filename).readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery",
                                         "Please check your example's layout",
                                         " and make sure it's correct")
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def dump_tokens(s):
    """Print out the tokens in s in a table format.

    The ENDMARKER is omitted.
    """
    f = BytesIO(s.encode('utf-8'))
    for type, token, start, end, line in tokenize(f.readline):
        if type == ENDMARKER:
            break
        type = tok_name[type]
        print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def assertExactTypeEqual(self, opstr, *optypes):
        tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
        num_optypes = len(optypes)
        self.assertEqual(len(tokens), 2 + num_optypes)
        self.assertEqual(token.tok_name[tokens[0].exact_type],
                         token.tok_name[ENCODING])
        for i in range(num_optypes):
            self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
                             token.tok_name[optypes[i]])
        self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
                         token.tok_name[token.ENDMARKER])
项目:RKSV    作者:ztp-at    | 项目源码 | 文件源码
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0])
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def check_tokenize(self, s, expected):
        # Format the tokens in s in a table format.
        # The ENDMARKER is omitted.
        result = []
        f = BytesIO(s.encode('utf-8'))
        for type, token, start, end, line in tokenize(f.readline):
            if type == ENDMARKER:
                break
            type = tok_name[type]
            result.append("    %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
                          locals())
        self.assertEqual(result,
                         ["    ENCODING   'utf-8'       (0, 0) (0, 0)"] +
                         expected.rstrip().splitlines())
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def assertExactTypeEqual(self, opstr, *optypes):
        tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
        num_optypes = len(optypes)
        self.assertEqual(len(tokens), 2 + num_optypes)
        self.assertEqual(token.tok_name[tokens[0].exact_type],
                         token.tok_name[ENCODING])
        for i in range(num_optypes):
            self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
                             token.tok_name[optypes[i]])
        self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
                         token.tok_name[token.ENDMARKER])
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def dump_tokens(s):
    """Print out the tokens in s in a table format.

    The ENDMARKER is omitted.
    """
    f = BytesIO(s.encode('utf-8'))
    for type, token, start, end, line in tokenize(f.readline):
        if type == ENDMARKER:
            break
        type = tok_name[type]
        print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def assertExactTypeEqual(self, opstr, *optypes):
        tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
        num_optypes = len(optypes)
        self.assertEqual(len(tokens), 2 + num_optypes)
        self.assertEqual(token.tok_name[tokens[0].exact_type],
                         token.tok_name[ENCODING])
        for i in range(num_optypes):
            self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
                             token.tok_name[optypes[i]])
        self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
                         token.tok_name[token.ENDMARKER])
项目:pisi    作者:examachine    | 项目源码 | 文件源码
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0])
项目:polylearn    作者:scikit-learn-contrib    | 项目源码 | 文件源码
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    if six.PY2:
        lines = open(filename).readlines()
    else:
        lines = open(filename, encoding='utf-8').readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery.\n"
                                         "Please check the layout of your"
                                         " example file:\n {}\n and make sure"
                                         " it's correct".format(filename))
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    if six.PY2:
        lines = open(filename).readlines()
    else:
        lines = open(filename, encoding='utf-8').readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery.\n"
                                         "Please check the layout of your"
                                         " example file:\n {}\n and make sure"
                                         " it's correct".format(filename))
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row