Java 类org.antlr.v4.runtime.IntStream 实例源码

项目:protobuf-netbeans-plugin    文件:AntlrCharStream.java   
@Override
public int LA(int i) {
    if (i == 0) {
        // undefined behavior, precondition is violated
        return 0;
    }

    if (i < 0) {
        throw new UnsupportedOperationException("Not implemented yet");
    }

    int symbol = 0;

    for (int j = 0; j < i && symbol != IntStream.EOF; j++) {
        symbol = read();
    }

    backup(i);
    return symbol;
}
项目:kalang    文件:TokenUtilTest.java   
@Test
public void test(){
    CommonTokenStream ts = TokenStreamFactory.createTokenStream("class{    }");
    int tokenSize = ts.size();
    assertEquals(0, tokenSize);
    List<Token> tokens = ts.getTokens();
    assertEquals(0, tokens.size());
    ts.consume();
    ts.consume();
    assertEquals("}", ts.LT(1).getText());
    assertEquals("{", ts.LT(-1).getText());
    assertEquals("class", ts.LT(-2).getText());
    //why is it 4?
    assertEquals(4, ts.size());
    int consumeSize = 2;
    while(ts.LA(1)!=IntStream.EOF){
        ts.consume();
        consumeSize++;
    }
    tokens = ts.getTokens();
    assertEquals(5, tokens.size());
    assertEquals(3, consumeSize);
}
项目:antsdb    文件:MysqlCharStream.java   
@Override
public int LA(int i) {
    if ( i==0 ) {
        return 0; // undefined
    }
    if ( i<0 ) {
        i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
        if ( (p+i-1) < 0 ) {
            return IntStream.EOF; // invalid; no char before first char
        }
    }

    if ( (p+i-1) >= n ) {
           return IntStream.EOF;
       }
    int ch = this.buf.get(p+i-1);
    return ch;
}
项目:mdetect    文件:AntlrCaseInsensitiveFileStream.java   
@Override
public int LA(int i) {
    if (i == 0) {
        return 0; // undefined
    }
    if (i < 0) {
        i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
        if ((p + i - 1) < 0) {
            return IntStream.EOF; // invalid; no char before first char
        }
    }

    if ((p + i - 1) >= n) {
        return IntStream.EOF;
    }
    return lookaheadData[p + i - 1];
}
项目:esper    文件:CaseInsensitiveInputStream.java   
@Override
public int LA(int i) {
    if (i == 0) {
        return 0;
    }
    if (i < 0) {
        i++;
        if ((p + i - 1) < 0) {
            return IntStream.EOF;
        }
    }

    if ((p + i - 1) >= n) {
        return IntStream.EOF;
    }

    return la[p + i - 1];
}
项目:perspective-backend    文件:CaseInsensitiveInputStream.java   
@Override
public int LA(int i) {
    if (i == 0) {
        return 0; // undefined
    }
    if (i < 0) {
        i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
        if ((p + i - 1) < 0) {
            return IntStream.EOF; // invalid; no char before first char
        }
    }

    if ((p + i - 1) >= n) {
        return IntStream.EOF;
    }

    return lookaheadData[p + i - 1];
}
项目:jetbrains    文件:ANTLRLexerAdaptor.java   
@Override
public void start(CharSequence buffer, int startOffset, int endOffset, int initialState) {
    this.buffer = buffer;
    this.endOffset = endOffset;

    CharStream in = new CharSequenceCharStream(buffer, endOffset, IntStream.UNKNOWN_SOURCE_NAME);
    in.seek(startOffset);

    ANTLRLexerState state;
    if (startOffset == 0 && initialState == 0) {
        state = getInitialState();
    } else {
        state = toLexerState(initialState);
    }

    applyLexerState(in, state);
    advance();
}
项目:Scratch-ApuC    文件:LexerATNSimulator.java   
protected int failOrAccept(SimState prevAccept, CharStream input,
                           ATNConfigSet reach, int t)
{
    if (prevAccept.dfaState != null) {
        LexerActionExecutor lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor;
        accept(input, lexerActionExecutor, startIndex,
            prevAccept.index, prevAccept.line, prevAccept.charPos);
        return prevAccept.dfaState.prediction;
    }
    else {
        // if no accept and EOF is first char, return EOF
        if ( t==IntStream.EOF && input.index()==startIndex ) {
            return Token.EOF;
        }

        throw new LexerNoViableAltException(recog, input, startIndex, reach);
    }
}
项目:Scratch-ApuC    文件:LexerATNSimulator.java   
protected void accept(@NotNull CharStream input, LexerActionExecutor lexerActionExecutor,
                      int startIndex, int index, int line, int charPos)
{
    if ( debug ) {
        System.out.format(Locale.getDefault(), "ACTION %s\n", lexerActionExecutor);
    }

    // seek to after last char in token
    input.seek(index);
    this.line = line;
    this.charPositionInLine = charPos;
    if (input.LA(1) != IntStream.EOF) {
        consume(input);
    }

    if (lexerActionExecutor != null && recog != null) {
        lexerActionExecutor.execute(recog, input, startIndex);
    }
}
项目:intellij-plugin-v4    文件:ANTLRLexerAdaptor.java   
@Override
public void start(CharSequence buffer, int startOffset, int endOffset, int initialState) {
    this.buffer = buffer;
    this.endOffset = endOffset;

    CharStream in = new CharSequenceCharStream(buffer, endOffset, IntStream.UNKNOWN_SOURCE_NAME);
    in.seek(startOffset);

    ANTLRLexerState state;
    if (startOffset == 0 && initialState == 0) {
        state = getInitialState();
    } else {
        state = toLexerState(initialState);
    }

    applyLexerState(in, state);
    advance();
}
项目:pddl-parser    文件:CaseInsensitiveInputStream.java   
@Override
//CHECKSTYLE:OFF
public int LA(int i) {
    //CHECKSTYLE:ON
    if (i == 0) {
        return 0; // undefined
    }
    if (i < 0) {
        i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
        if ((p + i - 1) < 0) {
            return IntStream.EOF; // invalid; no char before first char
        }
    }

    if ((p + i - 1) >= n) {
        return IntStream.EOF;
    }
    return lowerCaseData[p + i - 1];
}
项目:rainbow    文件:CaseInsensitiveStream.java   
@Override
public int LA(int i)
{
    int result = stream.LA(i);

    switch (result) {
        case 0:
        case IntStream.EOF:
            return result;
        default:
            return Character.toUpperCase(result);
    }
}
项目:protobuf-netbeans-plugin    文件:AntlrCharStream.java   
/**
 * @return a valid character from input or IntStream.EOF if there is no more
 *         characters available on input.
 */
private int read() {
    int result = input.read();

    if (result == LexerInput.EOF) {
        result = IntStream.EOF;
    }

    return result;
}
项目:codebuff    文件:LexerATNFactory.java   
@Override
public Handle tokenRef(TerminalAST node) {
    // Ref to EOF in lexer yields char transition on -1
    if ( node.getText().equals("EOF") ) {
        ATNState left = newState(node);
        ATNState right = newState(node);
        left.addTransition(new AtomTransition(right, IntStream.EOF));
        return new Handle(left, right);
    }
    return _ruleRef(node);
}
项目:ksql    文件:CaseInsensitiveStream.java   
@Override
public int LA(int i) {
  int result = stream.LA(i);

  switch (result) {
    case 0:
    case IntStream.EOF:
      return result;
    default:
      return Character.toUpperCase(result);
  }
}
项目:presto    文件:CaseInsensitiveStream.java   
@Override
public int LA(int i)
{
    int result = stream.LA(i);

    switch (result) {
        case 0:
        case IntStream.EOF:
            return result;
        default:
            return Character.toUpperCase(result);
    }
}
项目:hypertalk-java    文件:CaseInsensitiveInputStream.java   
@Override
public int LA(int i) {
    int data = super.LA(i);

    if (data == 0 || data == IntStream.EOF) {
        return data;
    } else {
        return lowercase[index() + i - 1];
    }
}
项目:rainbow    文件:DelimiterLexer.java   
@Override
public Token nextToken()
{
    if (_input == null) {
        throw new IllegalStateException("nextToken requires a non-null input stream.");
    }

    // Mark start location in char stream so unbuffered streams are
    // guaranteed at least have text of current token
    int tokenStartMarker = _input.mark();
    try {
        outer:
        while (true) {
            if (_hitEOF) {
                emitEOF();
                return _token;
            }

            _token = null;
            _channel = Token.DEFAULT_CHANNEL;
            _tokenStartCharIndex = _input.index();
            _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine();
            _tokenStartLine = getInterpreter().getLine();
            _text = null;
            do {
                _type = Token.INVALID_TYPE;
                int ttype = -1;

                // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit
                // added to match the delimiters before we attempt to match the token
                boolean found = false;
                for (String terminator : delimiters) {
                    if (match(terminator)) {
                        ttype = SqlBaseParser.DELIMITER;
                        found = true;
                        break;
                    }
                }

                if (!found) {
                    try {
                        ttype = getInterpreter().match(_input, _mode);
                    }
                    catch (LexerNoViableAltException e) {
                        notifyListeners(e);        // report error
                        recover(e);
                        ttype = SKIP;
                    }
                }

                if (_input.LA(1) == IntStream.EOF) {
                    _hitEOF = true;
                }
                if (_type == Token.INVALID_TYPE) {
                    _type = ttype;
                }
                if (_type == SKIP) {
                    continue outer;
                }
            }
            while (_type == MORE);
            if (_token == null) {
                emit();
            }
            return _token;
        }
    }
    finally {
        // make sure we release marker after match or
        // unbuffered char stream will keep buffering
        _input.release(tokenStartMarker);
    }
}
项目:presto    文件:DelimiterLexer.java   
@Override
public Token nextToken()
{
    if (_input == null) {
        throw new IllegalStateException("nextToken requires a non-null input stream.");
    }

    // Mark start location in char stream so unbuffered streams are
    // guaranteed at least have text of current token
    int tokenStartMarker = _input.mark();
    try {
        outer:
        while (true) {
            if (_hitEOF) {
                emitEOF();
                return _token;
            }

            _token = null;
            _channel = Token.DEFAULT_CHANNEL;
            _tokenStartCharIndex = _input.index();
            _tokenStartCharPositionInLine = getInterpreter().getCharPositionInLine();
            _tokenStartLine = getInterpreter().getLine();
            _text = null;
            do {
                _type = Token.INVALID_TYPE;
                int ttype = -1;

                // This entire method is copied from org.antlr.v4.runtime.Lexer, with the following bit
                // added to match the delimiters before we attempt to match the token
                boolean found = false;
                for (String terminator : delimiters) {
                    if (match(terminator)) {
                        ttype = SqlBaseParser.DELIMITER;
                        found = true;
                        break;
                    }
                }

                if (!found) {
                    try {
                        ttype = getInterpreter().match(_input, _mode);
                    }
                    catch (LexerNoViableAltException e) {
                        notifyListeners(e);        // report error
                        recover(e);
                        ttype = SKIP;
                    }
                }

                if (_input.LA(1) == IntStream.EOF) {
                    _hitEOF = true;
                }
                if (_type == Token.INVALID_TYPE) {
                    _type = ttype;
                }
                if (_type == SKIP) {
                    continue outer;
                }
            }
            while (_type == MORE);
            if (_token == null) {
                emit();
            }
            return _token;
        }
    }
    finally {
        // make sure we release marker after match or
        // unbuffered char stream will keep buffering
        _input.release(tokenStartMarker);
    }
}
项目:Scratch-ApuC    文件:LexerATNSimulator.java   
protected int execATN(@NotNull CharStream input, @NotNull DFAState ds0) {
    //System.out.println("enter exec index "+input.index()+" from "+ds0.configs);
    if ( debug ) {
        System.out.format(Locale.getDefault(), "start state closure=%s\n", ds0.configs);
    }

    int t = input.LA(1);
    @NotNull
    DFAState s = ds0; // s is current/from DFA state

    while ( true ) { // while more work
        if ( debug ) {
            System.out.format(Locale.getDefault(), "execATN loop starting closure: %s\n", s.configs);
        }

        // As we move src->trg, src->trg, we keep track of the previous trg to
        // avoid looking up the DFA state again, which is expensive.
        // If the previous target was already part of the DFA, we might
        // be able to avoid doing a reach operation upon t. If s!=null,
        // it means that semantic predicates didn't prevent us from
        // creating a DFA state. Once we know s!=null, we check to see if
        // the DFA state has an edge already for t. If so, we can just reuse
        // it's configuration set; there's no point in re-computing it.
        // This is kind of like doing DFA simulation within the ATN
        // simulation because DFA simulation is really just a way to avoid
        // computing reach/closure sets. Technically, once we know that
        // we have a previously added DFA state, we could jump over to
        // the DFA simulator. But, that would mean popping back and forth
        // a lot and making things more complicated algorithmically.
        // This optimization makes a lot of sense for loops within DFA.
        // A character will take us back to an existing DFA state
        // that already has lots of edges out of it. e.g., .* in comments.
        DFAState target = getExistingTargetState(s, t);
        if (target == null) {
            target = computeTargetState(input, s, t);
        }

        if (target == ERROR) {
            break;
        }

        if (target.isAcceptState) {
            captureSimState(prevAccept, input, target);
            if (t == IntStream.EOF) {
                break;
            }
        }

        if (t != IntStream.EOF) {
            consume(input);
            t = input.LA(1);
        }

        s = target; // flip; current DFA target becomes new src/from state
    }

    return failOrAccept(prevAccept, input, s.configs, t);
}
项目:stencil    文件:InvalidSignatureException.java   
public InvalidSignatureException(String message, Recognizer<?, ?> recognizer, IntStream input, ParserRuleContext ctx, Token flagToken) {
  super(message, recognizer, input, ctx);
  setOffendingToken(flagToken);
}