Python ply.lex 模块,LexToken() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用ply.lex.LexToken()

项目:epater    作者:mgard    | 项目源码 | 文件源码
def p_datainst3op_error(p):
    """datainst3op : OPDATA3OP logmnemonic flagscondandspace REG error REG COMMA op2
                   | OPDATA3OP logmnemonic flagscondandspace REG COMMA REG error op2
                   | OPDATA3OP logmnemonic flagscondandspace REG COMMA REG
                   | OPDATA3OP logmnemonic flagscondandspace REG error COMMA REG COMMA op2
                   | OPDATA3OP logmnemonic flagscondandspace REG COMMA REG error COMMA op2"""
    if len(p) == 9:
        raise YaccError("Les registres et/ou constantes utilisés dans une opération doivent être séparés par une virgule")
    elif len(p) == 7:
        raise YaccError("L'instruction {} requiert 3 arguments".format(p[1]))
    elif len(p) == 10:
        if isinstance(p[5], LexToken):
            raise YaccError("Le registre R{}{} n'existe pas".format(p[4], p[5].value))
        else:
            raise YaccError("Le registre R{}{} n'existe pas".format(p[6], p[7].value))
    elif len(p) == 11:
        raise YaccError("TEST")
项目:deb-python-lesscpy    作者:openstack    | 项目源码 | 文件源码
def token(self):
        """
        Token function. Contains 2 hacks:
            1.  Injects ';' into blocks where the last property
                leaves out the ;
            2.  Strips out whitespace from nonsignificant locations
                to ease parsing.
        """
        if self.next_:
            t = self.next_
            self.next_ = None
            return t
        while True:
            t = self.lexer.token()
            if not t:
                return t
            if t.type == 't_ws' and (
                self.pretok or (self.last
                                and self.last.type not in self.significant_ws)):
                continue
            self.pretok = False
            if t.type == 't_bclose' and self.last and self.last.type not in ['t_bopen', 't_bclose'] and self.last.type != 't_semicolon' \
                    and not (hasattr(t, 'lexer') and (t.lexer.lexstate == 'escapequotes' or t.lexer.lexstate == 'escapeapostrophe')):
                self.next_ = t
                tok = lex.LexToken()
                tok.type = 't_semicolon'
                tok.value = ';'
                tok.lineno = t.lineno
                tok.lexpos = t.lexpos
                self.last = tok
                self.lexer.in_property_decl = False
                return tok
            self.last = t
            break
        return t
项目:isar    作者:ilbers    | 项目源码 | 文件源码
def on_token(self, token):
        value, type = token

        self.lineno = 0
        t = lex.LexToken()
        t.value = value
        t.type = type
        t.lexer = self
        t.lexpos = 0
        t.lineno = 0

        self._tokens.append(t)
项目:Stove    作者:10c8    | 项目源码 | 文件源码
def _new_token(type, lineno):
    tok = lex.LexToken()
    tok.type = type
    tok.value = None
    tok.lineno = lineno

    return tok


# Synthesize a DEDENT tag
项目:deb-python-jsonpath-rw    作者:openstack    | 项目源码 | 文件源码
def token(self, value, ty=None):
        t = LexToken()
        t.type = ty if ty != None else value
        t.value = value
        t.lineno = -1
        t.lexpos = -1
        return t
项目:gitsome    作者:donnemartin    | 项目源码 | 文件源码
def _new_token(type, value, pos):
    o = LexToken()
    o.type = type
    o.value = value
    o.lineno, o.lexpos = pos
    return o
项目:pythonql    作者:pythonql    | 项目源码 | 文件源码
def t_eof(self, t):
    if t.lexer.indent_stack != [""]:
      t.lexer.indent_stack.pop()
      dedent_tok = lex.LexToken()
      dedent_tok.type = 'DEDENT'
      dedent_tok.value = ''
      dedent_tok.lineno = t.lexer.lineno
      dedent_tok.lexpos = t.lexer.lexpos
      return dedent_tok
    return None
项目:epater    作者:mgard    | 项目源码 | 文件源码
def p_datainst2op_error(p):
    """datainst2op : OPDATA2OP logmnemonic flagscondandspace error COMMA op2
                   | OPDATA2OP logmnemonic flagscondandspace REG error op2
                   | OPDATA2OP logmnemonic flagscondandspace REG error COMMA op2"""

    if len(p) == 8:
        raise YaccError("Le registre R{}{} n'existe pas".format(p[4], p[5].value))
    elif isinstance(p[4], LexToken):
        raise YaccError("L'instruction {} requiert un registre comme premier argument".format(p[1]))
    else:
        raise YaccError("Les registres et/ou constantes utilisés dans une opération doivent être séparés par une virgule")
项目:stone    作者:dropbox    | 项目源码 | 文件源码
def token(self):
        """
        Returns the next LexToken. Returns None when all tokens have been
        exhausted.
        """

        if self.tokens_queue:
            self.last_token = self.tokens_queue.pop(0)
        else:
            r = self.lex.token()
            if isinstance(r, MultiToken):
                self.tokens_queue.extend(r.tokens)
                self.last_token = self.tokens_queue.pop(0)
            else:
                if r is None and self.cur_indent > 0:
                    if self.last_token and self.last_token.type not in ('NEWLINE', 'LINE'):
                        newline_token = self._create_token('NEWLINE', '\n', self.lex.lineno,
                                                           self.lex.lexpos)
                        self.tokens_queue.append(newline_token)
                    dedent_count = self.cur_indent // 4
                    dedent_token = self._create_token('DEDENT', '\t', self.lex.lineno,
                                                      self.lex.lexpos)
                    self.tokens_queue.extend([dedent_token] * dedent_count)

                    self.cur_indent = 0
                    self.last_token = self.tokens_queue.pop(0)
                else:
                    self.last_token = r
        return self.last_token
项目:stone    作者:dropbox    | 项目源码 | 文件源码
def _create_token(self, token_type, value, lineno, lexpos):
        """
        Helper for creating ply.lex.LexToken objects. Unfortunately, LexToken
        does not have a constructor defined to make settings these values easy.
        """
        token = lex.LexToken()
        token.type = token_type
        token.value = value
        token.lineno = lineno
        token.lexpos = lexpos
        return token
项目:coal    作者:coal-lang    | 项目源码 | 文件源码
def _new_token(type, lineno):
    tok = lex.LexToken()
    tok.type = type
    tok.value = None
    tok.lineno = lineno

    return tok


# Synthesize a DEDENT tag
项目:whisper-python    作者:pietype    | 项目源码 | 文件源码
def p_error(self, p):
        # TODO
        if p:
            self._errors.append(p)
            pass # self._parser.errok()
        else:
            # hack handle eof, don't know why ply behaves this way
            from ply.lex import LexToken
            tok = LexToken()
            tok.value = self.lexer.lexdata[self.lexer.lexpos:]
            tok.lineno = self.lexer.lineno
            tok.type = 'error'
            tok.lexpos = self.lexer.lexpos
            self._parser.errok()
            return tok
项目:ZCC    作者:hlFu    | 项目源码 | 文件源码
def p_error(p):
    if not p:
        print("End of file.")
        return

    if p.type == 'EOF':
        if ZCClex.lexer.lexer.curlyBalance > 0:
            parser.errok()
            return lex.LexToken(
                'RCURCLYBRACKET',
                '}',
                p.lexer.lineno,
                p.lexer.lexpos)
        else:
            return

    print("Syntax error at %r, at line: %d, column: %d." % (
        p.value, p.lexer.lineno, ZCClex.find_column(p.lexer.lexdata, p)))
    if p.type == 'IDENTIFIER':
        print("Undefined Type " + p.value[1])

    if parser.errorCounter > 0:
        print("In panic mode\n")
        while True:
            tok = parser.token()
            if not tok or tok.type == 'SEMICOLON' or tok.type == 'RCURLYBRACKET':
                break
        parser.restart()
    else:
        parser.errorCounter += 1
    return p
项目:ZCC    作者:hlFu    | 项目源码 | 文件源码
def token(self):
        tok = self.lexer.token()
        if tok is None:
            if self.end:
                self.end = False
            else:
                self.end = True
                tok = lex.LexToken()
                tok.type = self.eof
                tok.value = None
                tok.lexpos = self.lexer.lexpos
                tok.lineno = self.lexer.lineno
        # print ('custom', tok)
        return tok
项目:pythonql    作者:pythonql    | 项目源码 | 文件源码
def t_NEWLINE(self,t):
    r'\n|\r'
    t.lexer.lineno += len(t.value)
    t.lexer.newlinepos = t.lexer.lexpos
    pos = t.lexer.lexpos
    data = t.lexer.lexdata

    # Consume all the whitespace until we hit something non-white or a newline
    while True:
      if pos >= len(data):
        return t

      if data[pos] in ['\n','\r'] or not re.match('\s',data[pos]):
        break

      pos += 1

    # If this is a line with just whitespace, or we're inside parenthesis,
    # don't return a token

    if data[pos] in ['\n', '\t', '#'] or t.lexer.opened > 0:
      return None

    ws = data[t.lexer.lexpos:pos]

    # Check if we went back to an older identation level, then
    # create some DEDENT tokens
    try:
      idx = t.lexer.indent_stack.index(ws)
      ndedents = len(t.lexer.indent_stack)-idx-1
      for i in range(ndedents):
        t.lexer.indent_stack.pop()
        dedent_tok = lex.LexToken()
        dedent_tok.type = 'DEDENT'
        dedent_tok.value = ''
        dedent_tok.lineno = t.lexer.lineno
        dedent_tok.lexpos = pos
        t.lexer.pushback_token(dedent_tok)

    # Otherwise, check if we have added an identation level and create
    # an IDENT token, or just return a newline

    except:
      last_ident = t.lexer.indent_stack[-1] if t.lexer.indent_stack else ""
      if ws.startswith(last_ident):
        indent_tok = lex.LexToken()
        indent_tok.type = 'INDENT'
        indent_tok.value = ws
        indent_tok.lineno = t.lexer.lineno
        indent_tok.lexpos = pos
        t.lexer.pushback_token(indent_tok)
        t.lexer.indent_stack.append(ws)

      # Current ident doesn't contain the previous ident, identation error!
      else:
        raise Exception("Bad ident at line %d" % t.lexer.lineno )
    return t

  # multi-qoute strings are pretty straightforward