Java 类org.antlr.v4.runtime.TokenSource 实例源码

项目:elasticsearch_my    文件:EnhancedPainlessLexer.java   
@Override
public Token nextToken() {
    if (stashedNext != null) {
        previous = stashedNext;
        stashedNext = null;
        return previous;
    }
    Token next = super.nextToken();
    if (insertSemicolon(previous, next)) {
        stashedNext = next;
        previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";",
                Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine());
        return previous;
    } else {
        previous = next;
        return next;
    }
}
项目:rainbow    文件:StatementSplitter.java   
public StatementSplitter(String sql, Set<String> delimiters)
{
    TokenSource tokens = getLexer(sql, delimiters);
    ImmutableList.Builder<Statement> list = ImmutableList.builder();
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseParser.DELIMITER) {
            String statement = sb.toString().trim();
            if (!statement.isEmpty()) {
                list.add(new Statement(statement, token.getText()));
            }
            sb = new StringBuilder();
        }
        else {
            sb.append(token.getText());
        }
    }
    this.completeStatements = list.build();
    this.partialStatement = sb.toString().trim();
}
项目:rainbow    文件:StatementSplitter.java   
public static String squeezeStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.of());
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseLexer.WS) {
            sb.append(' ');
        }
        else {
            sb.append(token.getText());
        }
    }
    return sb.toString().trim();
}
项目:presto    文件:StatementSplitter.java   
public StatementSplitter(String sql, Set<String> delimiters)
{
    TokenSource tokens = getLexer(sql, delimiters);
    ImmutableList.Builder<Statement> list = ImmutableList.builder();
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseParser.DELIMITER) {
            String statement = sb.toString().trim();
            if (!statement.isEmpty()) {
                list.add(new Statement(statement, token.getText()));
            }
            sb = new StringBuilder();
        }
        else {
            sb.append(token.getText());
        }
    }
    this.completeStatements = list.build();
    this.partialStatement = sb.toString().trim();
}
项目:presto    文件:StatementSplitter.java   
public static String squeezeStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseLexer.WS) {
            sb.append(' ');
        }
        else {
            sb.append(token.getText());
        }
    }
    return sb.toString().trim();
}
项目:xtext-ide    文件:FetchCompilerError.java   
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) {
    final String input = tokens.getInputStream().toString() + "\n ";
    final String[] lines = input.split("\n");
    final String errorLine = lines[line - 1];
    System.err.println(errorLine.replaceAll("\t", "    "));

    int stop = Math.min(charPositionInLine, errorLine.length());
    for (int i = 0; i < stop; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("    ");
        else
            System.err.print(" ");

    int stop2 = Math.min(stop + length, errorLine.length());
    for (int i = stop; i < stop2; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("^^^^");
        else
            System.err.print("^");
    System.err.println();
}
项目:jetbrains    文件:PSITokenSource.java   
/** Create an ANTLR Token from the current token type of the builder
     *  then advance the builder to next token (which ultimately calls an
     *  ANTLR lexer).  The {@link ANTLRLexerAdaptor} creates tokens via
     *  an ANTLR lexer but converts to {@link TokenIElementType} and here
     *  we have to convert back to an ANTLR token using what info we
     *  can get from the builder. We lose info such as the original channel.
     *  So, whitespace and comments (typically hidden channel) will look like
     *  real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()}
     *  and {@link ParserDefinition#getCommentTokens()} to strip these before
     *  our ANTLR parser sees them.
     */
    @Override
    public Token nextToken() {
        ProgressIndicatorProvider.checkCanceled();

        TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType();
        int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF;

        int channel = Token.DEFAULT_CHANNEL;
        Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null);
        String text = builder.getTokenText();
        int start = builder.getCurrentOffset();
        int length = text != null ? text.length() : 0;
        int stop = start + length - 1;
        // PsiBuilder doesn't provide line, column info
        int line = 0;
        int charPositionInLine = 0;
        Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine);
        builder.advanceLexer();
//      System.out.println("TOKEN: "+t);
        return t;
    }
项目:compiler    文件:BoaErrorListener.java   
public void error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) {
    hasError = true;

    final String filename = tokens.getSourceName();

    System.err.print(filename.substring(filename.lastIndexOf(File.separator) + 1) + ": compilation failed: ");
    System.err.print("Encountered " + kind + " error ");
    if (offendingSymbol != null)
        System.err.print("\"" + offendingSymbol + "\" ");
    System.err.println("at line " + line + ", column " + charPositionInLine + ". " + msg);

    underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length);

    if (e != null)
        for (final StackTraceElement st : e.getStackTrace())
            System.err.println("\tat " + st);
    else
        System.err.println("\tat unknown stack");
}
项目:compiler    文件:BoaErrorListener.java   
private void underlineError(final TokenSource tokens, final Token offendingToken, final int line, final int charPositionInLine, final int length) {
    final String input = tokens.getInputStream().toString() + "\n ";
    final String[] lines = input.split("\n");
    final String errorLine = lines[line - 1];
    System.err.println(errorLine.replaceAll("\t", "    "));

    int stop = Math.min(charPositionInLine, errorLine.length());
    for (int i = 0; i < stop; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("    ");
        else
            System.err.print(" ");

    int stop2 = Math.min(stop + length, errorLine.length());
    for (int i = stop; i < stop2; i++)
        if (errorLine.charAt(i) == '\t')
            System.err.print("^^^^");
        else
            System.err.print("^");

    System.err.println();
}
项目:mini-markdown    文件:CharsAsTokens.java   
public Token nextToken() {
        Token t = null;
        consumeUnknown();
        int c = input.LA(1);
        int i = input.index();
        if ( c == CharStream.EOF ) {
            t = getTokenFactory().create(Token.EOF, "<EOF>");
        }
        else {
            Integer ttypeI = charToTokenType.get(c);
            t = getTokenFactory().create(
                    new Pair<TokenSource,CharStream>(this,input),
                    ttypeI, String.valueOf((char)c), Token.DEFAULT_CHANNEL, i,  i,
                    line, charPosInLine);
        }
//      System.out.println(t.getText());
        consume();
        return t;
    }
项目:goworks    文件:TaggerTokenSource.java   
@Override
public Token nextToken() {
    if (previousTag != null && previousTag.getToken().getType() == Token.EOF) {
        return previousTag.getToken();
    }

    if (tagIterator.hasNext()) {
        previousTag = tagIterator.next().getTag();
    } else {
        TokenSource source = this;
        String text = null;
        int channel = Token.DEFAULT_CHANNEL;
        int start = snapshot.length();
        int stop = start - 1;
        int lineCount = snapshot.getLineCount();
        int lineLength = snapshot.findLineFromLineNumber(lineCount - 1).getLength();
        previousTag = new TokenTag<>(tokenFactory.create(getTokenFactorySourcePair(), Token.EOF, text, channel, start, stop, lineCount, lineLength));
    }

    line = -1;
    charPositionInLine = -1;
    return previousTag.getToken();
}
项目:goworks    文件:ParseTrees.java   
public static Interval getSourceInterval(@NonNull ParserRuleContext context) {
    Parameters.notNull("context", context);
    int startIndex = context.start.getStartIndex();
    Token stopSymbol = getStopSymbol(context);
    if (stopSymbol == null) {
        return new Interval(startIndex, startIndex - 1);
    }

    int stopIndex;
    if (stopSymbol.getType() != Token.EOF) {
        stopIndex = stopSymbol.getStopIndex();
    } else {
        TokenSource tokenSource = context.getStart().getTokenSource();
        CharStream inputStream = tokenSource != null ? tokenSource.getInputStream() : null;
        if (inputStream != null) {
            stopIndex = inputStream.size() - 1;
        } else {
            stopIndex = context.start.getStartIndex() - 1;
        }
    }

    stopIndex = Math.max(stopIndex, startIndex - 1);
    return new Interval(startIndex, stopIndex);
}
项目:intellij-plugin-v4    文件:ParsingUtils.java   
public static Token nextRealToken(CommonTokenStream tokens, int i) {
    int n = tokens.size();
    i++; // search after current i token
    if ( i>=n || i<0 ) return null;
    Token t = tokens.get(i);
    while ( t.getChannel()==Token.HIDDEN_CHANNEL ) {
        if ( t.getType()==Token.EOF ) {
            TokenSource tokenSource = tokens.getTokenSource();
            if ( tokenSource==null ) {
                return new CommonToken(Token.EOF, "EOF");
            }
            TokenFactory<?> tokenFactory = tokenSource.getTokenFactory();
            if ( tokenFactory==null ) {
                return new CommonToken(Token.EOF, "EOF");
            }
            return tokenFactory.create(Token.EOF, "EOF");
        }
        i++;
        if ( i>=n ) return null; // just in case no EOF
        t = tokens.get(i);
    }
    return t;
}
项目:intellij-plugin-v4    文件:PsiTokenSource.java   
@Override
    public Token nextToken() {
        TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType();
        int type;
        if ( ideaTType==null ) {
            type = Token.EOF;
        }
        else {
            type = ideaTType.getType();
        }

        int channel = Token.DEFAULT_CHANNEL;
        Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null);
        String text = builder.getTokenText();
        int start = builder.getCurrentOffset();
        int length = text != null ? text.length() : 0;
        int stop = start + length - 1;
        // PsiBuilder doesn't provide line, column info
        int line = 0;
        int charPositionInLine = 0;
        Token t = factory.create(source, type, text, channel, start, stop, line, charPositionInLine);
        builder.advanceLexer();
//      System.out.println("TOKEN: "+t);
        return t;
    }
项目:antlrworks2    文件:TaggerTokenSource.java   
@Override
public Token nextToken() {
    if (previousTag != null && previousTag.getToken().getType() == Token.EOF) {
        return previousTag.getToken();
    }

    if (tagIterator.hasNext()) {
        previousTag = tagIterator.next().getTag();
    } else {
        TokenSource source = this;
        String text = null;
        int channel = Token.DEFAULT_CHANNEL;
        int start = snapshot.length();
        int stop = start - 1;
        int lineCount = snapshot.getLineCount();
        int lineLength = snapshot.findLineFromLineNumber(lineCount - 1).getLength();
        previousTag = new TokenTag<>(tokenFactory.create(getTokenFactorySourcePair(), Token.EOF, text, channel, start, stop, lineCount, lineLength));
    }

    line = -1;
    charPositionInLine = -1;
    return previousTag.getToken();
}
项目:antlrworks2    文件:ParseTrees.java   
public static Interval getSourceInterval(@NonNull ParserRuleContext context) {
    Parameters.notNull("context", context);
    int startIndex = context.start.getStartIndex();
    Token stopSymbol = getStopSymbol(context);
    if (stopSymbol == null) {
        return new Interval(startIndex, startIndex - 1);
    }

    int stopIndex;
    if (stopSymbol.getType() != Token.EOF) {
        stopIndex = stopSymbol.getStopIndex();
    } else {
        TokenSource tokenSource = context.getStart().getTokenSource();
        CharStream inputStream = tokenSource != null ? tokenSource.getInputStream() : null;
        if (inputStream != null) {
            stopIndex = inputStream.size() - 1;
        } else {
            stopIndex = context.start.getStartIndex() - 1;
        }
    }

    stopIndex = Math.max(stopIndex, startIndex - 1);
    return new Interval(startIndex, stopIndex);
}
项目:rainbow    文件:StatementSplitter.java   
public static boolean isEmptyStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.of());
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            return true;
        }
        if (token.getChannel() != Token.HIDDEN_CHANNEL) {
            return false;
        }
    }
}
项目:protobuf-netbeans-plugin    文件:AntlrDocument.java   
/**
 * @requires text != null && tokenSource != null
 * @effects Makes this be a new Document d with d.text = text and d.tokens
 *          set to the tokens produced by tokenSource
 */
protected AntlrDocument(String text, TokenSource tokenSource) {
    Assert.notNull(text);
    Assert.notNull(tokenSource);

    this.text = text;
    this.tokens = new LinkedList<>();

    initTokens(tokenSource);
}
项目:protobuf-netbeans-plugin    文件:AntlrDocument.java   
/**
 * @requires the tokens of this are not initialized yet && source != null
 * @modifies this
 * @effects Initializes the tokens of this with the given token source.
 */
private void initTokens(TokenSource source) {
    Assert.isTrue(tokens.isEmpty());

    Token token;

    do {
        token = source.nextToken();
        tokens.add(token);
    } while (token.getType() != Token.EOF);
}
项目:org.ops4j.ramler    文件:AbstractGeneratorTest.java   
private TypescriptParser buildParser(File source) throws IOException {
    CharStream inputCharStream = CharStreams.fromPath(source.toPath());
    TokenSource tokenSource = new TypescriptLexer(inputCharStream);
    TokenStream inputTokenStream = new CommonTokenStream(tokenSource);
    TypescriptParser parser = new TypescriptParser(inputTokenStream);

    // make parser throw exception on first error
    parser.setErrorHandler(new BailErrorStrategy());

    // print detailed error messages to System.err
    parser.addErrorListener(new ConsoleErrorListener());

    return parser;
}
项目:learnantlr    文件:TestStringRecognition.java   
private ProgramContext parseProgram(String program, TestErrorListener errorListener) throws IOException
{
    CharStream inputCharStream = new ANTLRInputStream(new StringReader(program));
    TokenSource tokenSource = new ShapePlacerLexer(inputCharStream);
    TokenStream inputTokenStream = new CommonTokenStream(tokenSource);
    ShapePlacerParser parser = new ShapePlacerParser(inputTokenStream);
    parser.addErrorListener(errorListener);

    ProgramContext context = parser.program();
    return context;
}
项目:arithmetic    文件:TestArithmeticParser.java   
public static ProgramContext parseProgram(String program, ANTLRErrorListener errorListener) throws IOException
{
    CharStream inputCharStream = new ANTLRInputStream(new StringReader(program));
    TokenSource tokenSource = new ArithmeticLexer(inputCharStream);
    TokenStream inputTokenStream = new CommonTokenStream(tokenSource);
    ArithmeticParser parser = new ArithmeticParser(inputTokenStream);
    parser.addErrorListener(errorListener);
    ProgramContext context = parser.program();
    return context;
}
项目:presto    文件:StatementSplitter.java   
public static boolean isEmptyStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.<String>of());
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            return true;
        }
        if (token.getChannel() != Token.HIDDEN_CHANNEL) {
            return false;
        }
    }
}
项目:xtext-ide    文件:FetchCompilerError.java   
public String[] error(final String kind, final TokenSource tokens, final Object offendingSymbol, final int line, final int charPositionInLine, final int length, final String msg, final Exception e) {
    try {
        //underlineError(tokens, (Token)offendingSymbol, line, charPositionInLine, length);
        error[0] = Integer.toString(line); 
        error[1] = Integer.toString(charPositionInLine);
        error[2] = msg;
    } catch(Exception exception) {
    }

    return error;       
}
项目:tosca    文件:TSParser.java   
/** 
 * Initialize token source 
 * @throws IOException 
 */
protected TokenSource newTokenSource(Reader reader, int line, int column) throws IOException
{
    Lexer lexer = newLexer(new ANTLRInputStream(reader));
    lexer.setLine(line);
    lexer.setCharPositionInLine(column);
    return lexer;
}
项目:tosca    文件:TSParser.java   
/**
 * Setup input
 * @throws IOException 
 */
protected void setupInput(Reader reader, int line, int column) throws IOException
{
    TokenSource source = newTokenSource(reader, line, column);
    TokenStream input = new CommonTokenStream(source);

    initATN();
    setInputStream(input);

    setBuildParseTree(false);
}
项目:goworks    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
项目:goworks    文件:DocumentSnapshotToken.java   
public DocumentSnapshotToken(Tuple2<? extends TokenSource, CharStream> source, int type, int channel, int start, int stop) {
    super(source, type, channel, start, stop);
    CharStream inputStream = source.getItem2();
    if (!(inputStream instanceof DocumentSnapshotCharStream)) {
        throw new IllegalArgumentException(String.format("Expected a %s backed by a %s.", TokenSource.class.getSimpleName(), DocumentSnapshotCharStream.class.getSimpleName()));
    }

    DocumentSnapshotCharStream charStream = (DocumentSnapshotCharStream)inputStream;
    snapshot = charStream.getSnapshot();
}
项目:goworks    文件:DocumentSnapshotTokenFactory.java   
@Override
public DocumentSnapshotToken create(Tuple2<? extends TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) {
    if (effectiveSource != null) {
        source = effectiveSource;
    }

    DocumentSnapshotToken t = new DocumentSnapshotToken(source, type, channel, start, stop);
    t.setLine(line);
    t.setCharPositionInLine(charPositionInLine);
    if ( text!=null ) {
        t.setText(text);
    }
    return t;
}
项目:goworks    文件:TaggerTokenSource.java   
@NonNull
protected Tuple2<? extends TokenSource, CharStream> getTokenFactorySourcePair() {
    if (tokenFactorySourcePair == null) {
        tokenFactorySourcePair = Tuple.create(this, getInputStream());
    }

    return tokenFactorySourcePair;
}
项目:JavaSharp    文件:XmlEmittingVisitor.java   
@Override
public Object visitTerminal(TerminalNode node) {
    Token symbol = node.getSymbol();
    int tokenIndex = symbol.getTokenIndex();
    emitComments(tokenIndex);
    TokenSource tokenSource = symbol.getTokenSource();
    emitToken("Symbol", symbol);
    return null;
}
项目:intellij-plugin-v4    文件:ParsingUtils.java   
public static Token getSkippedTokenUnderCursor(CommonTokenStream tokens, int offset) {
    if ( offset<0 || offset >= tokens.getTokenSource().getInputStream().size() ) return null;
    Token prevToken = null;
    Token tokenUnderCursor = null;
    for (Token t : tokens.getTokens()) {
        int begin = t.getStartIndex();
        int end = t.getStopIndex();
        if ( (prevToken==null || offset > prevToken.getStopIndex()) && offset < begin ) {
            // found in between
            TokenSource tokenSource = tokens.getTokenSource();
            CharStream inputStream = null;
            if ( tokenSource!=null ) {
                inputStream = tokenSource.getInputStream();
            }
            tokenUnderCursor = new org.antlr.v4.runtime.CommonToken(
                new Pair<TokenSource, CharStream>(tokenSource, inputStream),
                Token.INVALID_TYPE,
                -1,
                prevToken!=null ? prevToken.getStopIndex()+1 : 0,
                begin-1
            );
            break;
        }
        if ( offset >= begin && offset <= end ) {
            tokenUnderCursor = t;
            break;
        }
        prevToken = t;
    }
    return tokenUnderCursor;
}
项目:antlrworks2    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
项目:antlrworks2    文件:DocumentSnapshotToken.java   
public DocumentSnapshotToken(Tuple2<? extends TokenSource, CharStream> source, int type, int channel, int start, int stop) {
    super(source, type, channel, start, stop);
    CharStream inputStream = source.getItem2();
    if (!(inputStream instanceof DocumentSnapshotCharStream)) {
        throw new IllegalArgumentException(String.format("Expected a %s backed by a %s.", TokenSource.class.getSimpleName(), DocumentSnapshotCharStream.class.getSimpleName()));
    }

    DocumentSnapshotCharStream charStream = (DocumentSnapshotCharStream)inputStream;
    snapshot = charStream.getSnapshot();
}
项目:antlrworks2    文件:DocumentSnapshotTokenFactory.java   
@Override
public DocumentSnapshotToken create(Tuple2<? extends TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) {
    if (effectiveSource != null) {
        source = effectiveSource;
    }

    DocumentSnapshotToken t = new DocumentSnapshotToken(source, type, channel, start, stop);
    t.setLine(line);
    t.setCharPositionInLine(charPositionInLine);
    if ( text!=null ) {
        t.setText(text);
    }
    return t;
}
项目:antlrworks2    文件:TaggerTokenSource.java   
@NonNull
protected Tuple2<? extends TokenSource, CharStream> getTokenFactorySourcePair() {
    if (tokenFactorySourcePair == null) {
        tokenFactorySourcePair = Tuple.create(this, getInputStream());
    }

    return tokenFactorySourcePair;
}
项目:elasticsearch_my    文件:StashingTokenFactory.java   
@Override
public T create(Pair<TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line,
        int charPositionInLine) {
    return maybeStash(delegate.create(source, type, text, channel, start, stop, line, charPositionInLine));
}
项目:rest-modeling-framework    文件:RamlTokenFactory.java   
@Override
public RamlToken create(Pair<TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) {
    RamlToken ramlToken = new RamlToken(source, type, channel, start, stop);
    ramlToken.setText(text);
    return ramlToken;
}
项目:rainbow    文件:StatementSplitter.java   
private static TokenSource getLexer(String sql, Set<String> terminators)
{
    requireNonNull(sql, "sql is null");
    CharStream stream = new CaseInsensitiveStream(new ANTLRInputStream(sql));
    return new DelimiterLexer(stream, terminators);
}
项目:assertjGen-gradle-plugin    文件:Hoge.java   
public static void foo(TokenSource tokenSource) {
    System.out.println(tokenSource);
}