Python token 模块,ENDMARKER 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用token.ENDMARKER

项目:sc-controller    作者:kozec    | 项目源码 | 文件源码
def restart(self, string):
        """
        Restarts parsing with new string
        Returns self for chaining.
        """

        try:
            self.tokens = [
                ActionParser.Token(type, string)
                for (type, string, trash, trash, trash)
                in generate_tokens( iter([string]).next )
                if type != TokenType.ENDMARKER
            ]
        except TokenError:
            self.tokens = None
        self.index = 0
        return self
项目:hostapd-mana    作者:adde88    | 项目源码 | 文件源码
def file_input(self, nodelist):
        doc = self.get_docstring(nodelist, symbol.file_input)
        if doc is not None:
            i = 1
        else:
            i = 0
        stmts = []
        for node in nodelist[i:]:
            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
                self.com_append_stmt(stmts, node)
        return Module(doc, Stmt(stmts))
项目:asttokens    作者:gristlabs    | 项目源码 | 文件源码
def _find_last_in_line(self, start_token):
    try:
      newline = self._code.find_token(start_token, token.NEWLINE)
    except IndexError:
      newline = self._code.find_token(start_token, token.ENDMARKER)
    return self._code.prev_token(newline)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def file_input(self, nodelist):
        doc = self.get_docstring(nodelist, symbol.file_input)
        if doc is not None:
            i = 1
        else:
            i = 0
        stmts = []
        for node in nodelist[i:]:
            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
                self.com_append_stmt(stmts, node)
        return Module(doc, Stmt(stmts))
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def file_input(self, nodelist):
        doc = self.get_docstring(nodelist, symbol.file_input)
        if doc is not None:
            i = 1
        else:
            i = 0
        stmts = []
        for node in nodelist[i:]:
            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
                self.com_append_stmt(stmts, node)
        return Module(doc, Stmt(stmts))
项目:sslstrip-hsts-openwrt    作者:adde88    | 项目源码 | 文件源码
def file_input(self, nodelist):
        doc = self.get_docstring(nodelist, symbol.file_input)
        if doc is not None:
            i = 1
        else:
            i = 0
        stmts = []
        for node in nodelist[i:]:
            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
                self.com_append_stmt(stmts, node)
        return Module(doc, Stmt(stmts))
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def file_input(self, nodelist):
        doc = self.get_docstring(nodelist, symbol.file_input)
        if doc is not None:
            i = 1
        else:
            i = 0
        stmts = []
        for node in nodelist[i:]:
            if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
                self.com_append_stmt(stmts, node)
        return Module(doc, Stmt(stmts))
项目:rensapy    作者:RensaProject    | 项目源码 | 文件源码
def _simulate_compile_singlemode(self, s):
        # Calculate line offsets
        lines = [0, 0]
        pos = 0
        while 1:
            pos = s.find('\n', pos)+1
            if not pos: break
            lines.append(pos)
        lines.append(len(s))

        oldpos = 0
        parenlevel = 0
        deflevel = 0
        output = []
        stmt = []

        text = StringIO(s)
        tok_gen = tokenize.generate_tokens(text.readline)
        for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
            newpos = lines[srow] + scol
            stmt.append(s[oldpos:newpos])
            if tok != '':
                stmt.append(tok)
            oldpos = newpos + len(tok)

            # Update the paren level.
            if tok in '([{':
                parenlevel += 1
            if tok in '}])':
                parenlevel -= 1

            if tok in ('def', 'class') and deflevel == 0:
                deflevel = 1
            if deflevel and toktype == token.INDENT:
                deflevel += 1
            if deflevel and toktype == token.DEDENT:
                deflevel -= 1

            # Are we starting a statement?
            if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
                             token.INDENT, token.ENDMARKER) or
                 tok==':') and parenlevel == 0):
                if deflevel == 0 and self._is_expr(stmt[1:-2]):
                    output += stmt[0]
                    output.append('__print__((')
                    output += stmt[1:-2]
                    output.append('))')
                    output += stmt[-2:]
                else:
                    output += stmt
                stmt = []
        return ''.join(output)
项目:RePhraser    作者:MissLummie    | 项目源码 | 文件源码
def _simulate_compile_singlemode(self, s):
        # Calculate line offsets
        lines = [0, 0]
        pos = 0
        while 1:
            pos = s.find('\n', pos)+1
            if not pos: break
            lines.append(pos)
        lines.append(len(s))

        oldpos = 0
        parenlevel = 0
        deflevel = 0
        output = []
        stmt = []

        text = StringIO(s)
        tok_gen = tokenize.generate_tokens(text.readline)
        for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
            newpos = lines[srow] + scol
            stmt.append(s[oldpos:newpos])
            if tok != '':
                stmt.append(tok)
            oldpos = newpos + len(tok)

            # Update the paren level.
            if tok in '([{':
                parenlevel += 1
            if tok in '}])':
                parenlevel -= 1

            if tok in ('def', 'class') and deflevel == 0:
                deflevel = 1
            if deflevel and toktype == token.INDENT:
                deflevel += 1
            if deflevel and toktype == token.DEDENT:
                deflevel -= 1

            # Are we starting a statement?
            if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
                             token.INDENT, token.ENDMARKER) or
                 tok==':') and parenlevel == 0):
                if deflevel == 0 and self._is_expr(stmt[1:-2]):
                    output += stmt[0]
                    output.append('__print__((')
                    output += stmt[1:-2]
                    output.append('))')
                    output += stmt[-2:]
                else:
                    output += stmt
                stmt = []
        return ''.join(output)
项目:Verideals    作者:Derrreks    | 项目源码 | 文件源码
def _simulate_compile_singlemode(self, s):
        # Calculate line offsets
        lines = [0, 0]
        pos = 0
        while 1:
            pos = s.find('\n', pos)+1
            if not pos: break
            lines.append(pos)
        lines.append(len(s))

        oldpos = 0
        parenlevel = 0
        deflevel = 0
        output = []
        stmt = []

        text = StringIO(s)
        tok_gen = tokenize.generate_tokens(text.readline)
        for toktype, tok, (srow,scol), (erow,ecol), line in tok_gen:
            newpos = lines[srow] + scol
            stmt.append(s[oldpos:newpos])
            if tok != '':
                stmt.append(tok)
            oldpos = newpos + len(tok)

            # Update the paren level.
            if tok in '([{':
                parenlevel += 1
            if tok in '}])':
                parenlevel -= 1

            if tok in ('def', 'class') and deflevel == 0:
                deflevel = 1
            if deflevel and toktype == token.INDENT:
                deflevel += 1
            if deflevel and toktype == token.DEDENT:
                deflevel -= 1

            # Are we starting a statement?
            if ((toktype in (token.NEWLINE, tokenize.NL, tokenize.COMMENT,
                             token.INDENT, token.ENDMARKER) or
                 tok==':') and parenlevel == 0):
                if deflevel == 0 and self._is_expr(stmt[1:-2]):
                    output += stmt[0]
                    output.append('__print__((')
                    output += stmt[1:-2]
                    output.append('))')
                    output += stmt[-2:]
                else:
                    output += stmt
                stmt = []
        return ''.join(output)