Java 类org.antlr.v4.runtime.Token 实例源码

项目:rapidminer    文件:CapitulatingErrorStrategy.java   
@Override
protected void reportNoViableAlternative(Parser recognizer, NoViableAltException e) {
    // change error message from default implementation
    TokenStream tokens = recognizer.getInputStream();
    String input;
    if (tokens != null) {
        if (e.getStartToken().getType() == Token.EOF) {
            input = "the end";
        } else {
            input = escapeWSAndQuote(tokens.getText(e.getStartToken(), e.getOffendingToken()));
        }
    } else {
        input = escapeWSAndQuote("<unknown input>");
    }
    String msg = "inadmissible input at " + input;
    recognizer.notifyErrorListeners(e.getOffendingToken(), msg, e);
}
项目:rapidminer    文件:CapitulatingErrorStrategy.java   
@Override
protected String getTokenErrorDisplay(Token t) {
    // overwrite standard behavior to use "the end" instead of <EOF>
    if (t == null) {
        return "<no token>";
    }
    String s = getSymbolText(t).replace("<EOF>", "the end");
    if (s == null) {
        if (getSymbolType(t) == Token.EOF) {
            s = "the end";
        } else {
            s = escapeWSAndQuote("<" + getSymbolType(t) + ">");
        }
    }
    return s;
}
项目:elasticsearch_my    文件:EnhancedPainlessLexer.java   
@Override
public Token nextToken() {
    if (stashedNext != null) {
        previous = stashedNext;
        stashedNext = null;
        return previous;
    }
    Token next = super.nextToken();
    if (insertSemicolon(previous, next)) {
        stashedNext = next;
        previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";",
                Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine());
        return previous;
    } else {
        previous = next;
        return next;
    }
}
项目:elasticsearch_my    文件:EnhancedPainlessLexer.java   
@Override
protected boolean slashIsRegex() {
    Token lastToken = getPreviousToken();
    if (lastToken == null) {
        return true;
    }
    switch (lastToken.getType()) {
    case PainlessLexer.RBRACE:
    case PainlessLexer.RP:
    case PainlessLexer.OCTAL:
    case PainlessLexer.HEX:
    case PainlessLexer.INTEGER:
    case PainlessLexer.DECIMAL:
    case PainlessLexer.ID:
    case PainlessLexer.DOTINTEGER:
    case PainlessLexer.DOTID:
        return false;
    default:
        return true;
    }
}
项目:elasticsearch_my    文件:ParserErrorStrategy.java   
@Override
public void recover(final Parser recognizer, final RecognitionException re) {
    final Token token = re.getOffendingToken();
    String message;

    if (token == null) {
        message = "no parse token found.";
    } else if (re instanceof InputMismatchException) {
        message = "unexpected token [" + getTokenErrorDisplay(token) + "]" +
                " was expecting one of [" + re.getExpectedTokens().toString(recognizer.getVocabulary()) + "].";
    } else if (re instanceof NoViableAltException) {
        if (token.getType() == PainlessParser.EOF) {
            message = "unexpected end of script.";
        } else {
            message = "invalid sequence of tokens near [" + getTokenErrorDisplay(token) + "].";
        }
    } else {
        message =  "unexpected token near [" + getTokenErrorDisplay(token) + "].";
    }

    Location location = new Location(sourceName, token == null ? -1 : token.getStartIndex());
    throw location.createError(new IllegalArgumentException(message, re));
}
项目:oscm-app    文件:FWPolicyErrorStrategy.java   
/**
 * Make sure we don't attempt to recover inline; if the parser successfully
 * recovers, it won't throw an exception.
 */
@Override
public Token recoverInline(Parser recognizer) throws RecognitionException {
    InputMismatchException e = new InputMismatchException(recognizer);

    String policies = recognizer.getInputStream().getText();
    StringTokenizer tk = new StringTokenizer(policies, ";");
    String policy = "";
    int idx = 0;
    while (tk.hasMoreElements()) {
        policy = (String) tk.nextElement();
        idx += policy.length();
        if (idx >= e.getOffendingToken().getStartIndex()) {
            break;
        }
    }

    String message = Messages.get(Messages.DEFAULT_LOCALE,
            "error_invalid_firewallconfig", new Object[] {
                    e.getOffendingToken().getText(), policy });
    throw new RuntimeException(message);
}
项目:ts-swift-transpiler    文件:SwiftSupport.java   
/**
 "If an operator has whitespace on the right side only, it is treated as a
 postfix unary operator. As an example, the ++ operator in a++ b is treated
 as a postfix unary operator."
 "If an operator has no whitespace on the left but is followed immediately
 by a dot (.), it is treated as a postfix unary operator. As an example,
 the ++ operator in a++.b is treated as a postfix unary operator (a++ .b
 rather than a ++ .b)."
 */
public static boolean isPostfixOp(TokenStream tokens) {
    int stop = getLastOpTokenIndex(tokens);
    if ( stop==-1 ) return false;

    int start = tokens.index();
    Token prevToken = tokens.get(start-1); // includes hidden-channel tokens
    Token nextToken = tokens.get(stop+1);
    boolean prevIsWS = isLeftOperatorWS(prevToken);
    boolean nextIsWS = isRightOperatorWS(nextToken);
    boolean result =
        !prevIsWS && nextIsWS ||
        !prevIsWS && nextToken.getType()==SwiftParser.DOT;
    String text = tokens.getText(Interval.of(start, stop));
    //System.out.println("isPostfixOp: '"+prevToken+"','"+text+"','"+nextToken+"' is "+result);
    return result;
}
项目:rest-modeling-framework    文件:AbstractConstructor.java   
@Override
public Object visitTypedElementTuple(RAMLParser.TypedElementTupleContext typedeElementTuple) {
    final Token type = typedeElementTuple.type;
    final String name = typedeElementTuple.name.getText();

    final EObject propertyType = Strings.isNullOrEmpty(type.getText()) ?
            scope.getEObjectByName(BuiltinType.STRING.getName()) :
            typeExpressionConstructor.parse(type.getText(), scope);
    final boolean isRequired = !name.endsWith("?");
    scope.setValue(TYPED_ELEMENT__REQUIRED, isRequired, typedeElementTuple.getStart());
    final String parsedName = isRequired ? name : name.substring(0, name.length() - 1);

    scope.setValue(IDENTIFIABLE_ELEMENT__NAME, parsedName, typedeElementTuple.getStart());
    scope.setValue(TYPED_ELEMENT__TYPE, propertyType, typedeElementTuple.getStart());

    return scope.eObject();
}
项目:rest-modeling-framework    文件:TypeDeclarationResolver.java   
private EObject resolveType(final ParserRuleContext ruleContext, final EObject superType) {
    final EObject resolvedType;

    if (superType.eIsProxy()) {
        resolvedType = null;
    } else {
        resolvedType = create(superType.eClass(), ruleContext);
        EcoreUtil.replace(unresolved, resolvedType);

        final Token nameToken = ruleContext.getStart();
        final String name = nameToken.getText();
        final Scope typeScope = scope.with(resolvedType, TYPE_CONTAINER__TYPES);

        typeScope.with(IDENTIFIABLE_ELEMENT__NAME)
                .setValue(name, nameToken);

        typeScope.with(unresolved.eClass().getEStructuralFeature("type"))
                .setValue(superType, nameToken);
    }

    return resolvedType;
}
项目:oscm    文件:FWPolicyErrorStrategy.java   
/**
 * Make sure we don't attempt to recover inline; if the parser successfully
 * recovers, it won't throw an exception.
 */
@Override
public Token recoverInline(Parser recognizer) throws RecognitionException {
    InputMismatchException e = new InputMismatchException(recognizer);

    String policies = recognizer.getInputStream().getText();
    StringTokenizer tk = new StringTokenizer(policies, ";");
    String policy = "";
    int idx = 0;
    while (tk.hasMoreElements()) {
        policy = (String) tk.nextElement();
        idx += policy.length();
        if (idx >= e.getOffendingToken().getStartIndex()) {
            break;
        }
    }

    String message = Messages.get(Messages.DEFAULT_LOCALE,
            "error_invalid_firewallconfig", new Object[] {
                    e.getOffendingToken().getText(), policy });
    throw new RuntimeException(message);
}
项目:beaker-notebook-archive    文件:GrammarPredicates.java   
public static boolean isClassName(TokenStream _input) {
  try {
    int i=1;
    Token token = _input.LT(i);
    while (token!=null && i < _input.size() && _input.LT(i+1).getType() == GroovyParser.DOT) {
      i = i + 2;
      token = _input.LT(i);
    }
    if(token==null)
      return false;
    // TODO here
    return Character.isUpperCase(Character.codePointAt(token.getText(), 0));
  } catch(Exception e) {
    e.printStackTrace();
  }

  return false;
}
项目:rainbow    文件:StatementSplitter.java   
public StatementSplitter(String sql, Set<String> delimiters)
{
    TokenSource tokens = getLexer(sql, delimiters);
    ImmutableList.Builder<Statement> list = ImmutableList.builder();
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseParser.DELIMITER) {
            String statement = sb.toString().trim();
            if (!statement.isEmpty()) {
                list.add(new Statement(statement, token.getText()));
            }
            sb = new StringBuilder();
        }
        else {
            sb.append(token.getText());
        }
    }
    this.completeStatements = list.build();
    this.partialStatement = sb.toString().trim();
}
项目:rainbow    文件:StatementSplitter.java   
public static String squeezeStatement(String sql)
{
    TokenSource tokens = getLexer(sql, ImmutableSet.of());
    StringBuilder sb = new StringBuilder();
    while (true) {
        Token token = tokens.nextToken();
        if (token.getType() == Token.EOF) {
            break;
        }
        if (token.getType() == SqlBaseLexer.WS) {
            sb.append(' ');
        }
        else {
            sb.append(token.getText());
        }
    }
    return sb.toString().trim();
}
项目:rainbow    文件:SqlParser.java   
@Override
public void exitNonReserved(SqlBaseParser.NonReservedContext context)
{
    // we can't modify the tree during rule enter/exit event handling unless we're dealing with a terminal.
    // Otherwise, ANTLR gets confused an fires spurious notifications.
    if (!(context.getChild(0) instanceof TerminalNode)) {
        int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex();
        throw new AssertionError("nonReserved can only contain tokens. Found nested rule: " + ruleNames.get(rule));
    }

    // replace nonReserved words with IDENT tokens
    context.getParent().removeLastChild();

    Token token = (Token) context.getChild(0).getPayload();
    context.getParent().addChild(new CommonToken(
            new Pair<>(token.getTokenSource(), token.getInputStream()),
            SqlBaseLexer.IDENTIFIER,
            token.getChannel(),
            token.getStartIndex(),
            token.getStopIndex()));
}
项目:rainbow    文件:AstBuilder.java   
private static ArithmeticBinaryExpression.Type getArithmeticBinaryOperator(Token operator)
{
    switch (operator.getType()) {
        case SqlBaseLexer.PLUS:
            return ArithmeticBinaryExpression.Type.ADD;
        case SqlBaseLexer.MINUS:
            return ArithmeticBinaryExpression.Type.SUBTRACT;
        case SqlBaseLexer.ASTERISK:
            return ArithmeticBinaryExpression.Type.MULTIPLY;
        case SqlBaseLexer.SLASH:
            return ArithmeticBinaryExpression.Type.DIVIDE;
        case SqlBaseLexer.PERCENT:
            return ArithmeticBinaryExpression.Type.MODULUS;
    }

    throw new UnsupportedOperationException("Unsupported operator: " + operator.getText());
}
项目:rainbow    文件:AstBuilder.java   
private static ComparisonExpressionType getComparisonOperator(Token symbol)
{
    switch (symbol.getType()) {
        case SqlBaseLexer.EQ:
            return ComparisonExpressionType.EQUAL;
        case SqlBaseLexer.NEQ:
            return ComparisonExpressionType.NOT_EQUAL;
        case SqlBaseLexer.LT:
            return ComparisonExpressionType.LESS_THAN;
        case SqlBaseLexer.LTE:
            return ComparisonExpressionType.LESS_THAN_OR_EQUAL;
        case SqlBaseLexer.GT:
            return ComparisonExpressionType.GREATER_THAN;
        case SqlBaseLexer.GTE:
            return ComparisonExpressionType.GREATER_THAN_OR_EQUAL;
    }

    throw new IllegalArgumentException("Unsupported operator: " + symbol.getText());
}
项目:rainbow    文件:AstBuilder.java   
private static CurrentTime.Type getDateTimeFunctionType(Token token)
{
    switch (token.getType()) {
        case SqlBaseLexer.CURRENT_DATE:
            return CurrentTime.Type.DATE;
        case SqlBaseLexer.CURRENT_TIME:
            return CurrentTime.Type.TIME;
        case SqlBaseLexer.CURRENT_TIMESTAMP:
            return CurrentTime.Type.TIMESTAMP;
        case SqlBaseLexer.LOCALTIME:
            return CurrentTime.Type.LOCALTIME;
        case SqlBaseLexer.LOCALTIMESTAMP:
            return CurrentTime.Type.LOCALTIMESTAMP;
    }

    throw new IllegalArgumentException("Unsupported special function: " + token.getText());
}
项目:rainbow    文件:AstBuilder.java   
private static IntervalLiteral.IntervalField getIntervalFieldType(Token token)
{
    switch (token.getType()) {
        case SqlBaseLexer.YEAR:
            return IntervalLiteral.IntervalField.YEAR;
        case SqlBaseLexer.MONTH:
            return IntervalLiteral.IntervalField.MONTH;
        case SqlBaseLexer.DAY:
            return IntervalLiteral.IntervalField.DAY;
        case SqlBaseLexer.HOUR:
            return IntervalLiteral.IntervalField.HOUR;
        case SqlBaseLexer.MINUTE:
            return IntervalLiteral.IntervalField.MINUTE;
        case SqlBaseLexer.SECOND:
            return IntervalLiteral.IntervalField.SECOND;
    }

    throw new IllegalArgumentException("Unsupported interval field: " + token.getText());
}
项目:gitplex-mit    文件:ANTLRAssistBehavior.java   
@Override
protected int getAnchor(String content) {
    List<Token> tokens = codeAssist.getGrammar().lex(content);
    if (tokens.isEmpty()) {
        return 0;
    } else {
        Token lastToken = tokens.get(tokens.size()-1);
        String contentAfterLastToken = content.substring(lastToken.getStopIndex()+1);
        if (contentAfterLastToken.length() > 0) {
            contentAfterLastToken = StringUtils.trimStart(contentAfterLastToken);
            return content.length() - contentAfterLastToken.length();
        } else {
            return lastToken.getStartIndex();
        }
    }
}
项目:OperatieBRP    文件:ParserErrorListener.java   
@Override
public void syntaxError(final org.antlr.v4.runtime.Recognizer<?, ?> recognizer,
                        final java.lang.Object offendingSymbol,
                        final int line, final int charPositionInLine, final java.lang.String msg,
                        final org.antlr.v4.runtime.RecognitionException e) {
    final Token antlrToken = (Token) offendingSymbol;

    if (offendingSymbol == null) {
        //in het geval van lexer fouten, bijv. token recognition error at: '*' voor de expressie **bla**
        throw new ExpressieParseException(msg);
    } else {
        throw new ExpressieParseException(
                String.format("%s \"%s\" op positie: %d", ParserFoutCode.SYNTAX_ERROR, msg, antlrToken.getStartIndex())
        );
    }
}
项目:Tarski    文件:Utilities.java   
/**
 * Cloning expression to create new same expression.
 */
public static ExprContext cloneExprContext(final ExprContext expr) {

  final ExprContext clone = createContextType(expr);

  clone.copyFrom(expr);

  for (final ParseTree child : expr.children) {
    if (child instanceof TerminalNode) {
      clone.addChild(new TerminalNodeImpl(((TerminalNode) child).getSymbol()));
    } else if (child instanceof ExprContext) {
      final ExprContext cloneChild = cloneExprContext((ExprContext) child);
      clone.addChild(cloneChild);
      setLeftRight(clone, cloneChild);
    } else if (child instanceof Token) {
      clone.addChild(new CommonToken((Token) child));
    }
  }
  return clone;
}
项目:sonar-tsql-plugin    文件:Antlr4Utils.java   
public static void print(final ParseTree node, final int level, CommonTokenStream stream) {
    final Interval sourceInterval = node.getSourceInterval();

    final Token firstToken = stream.get(sourceInterval.a);

    int line = firstToken.getLine();
    int charStart = firstToken.getCharPositionInLine();

    int endLine = line;
    int endChar = charStart + firstToken.getText().length();

    String data = "@(" + line + ":" + charStart + "," + endLine + ":" + endChar + ") with text: "
            + firstToken.getText();
    final int tmp = level + 1;
    final StringBuilder sb = new StringBuilder();
    sb.append(StringUtils.repeat("\t", level));
    sb.append(node.getClass().getSimpleName() + ": " + data + " :" + node.getText());
    System.out.println(sb.toString());
    final int n = node.getChildCount();
    for (int i = 0; i < n; i++) {

        final ParseTree c = node.getChild(i);
        print(c, tmp, stream);

    }
}
项目:kalang    文件:AstBuilder.java   
@Override
public AstNode visitPostIfStmt(KalangParser.PostIfStmtContext ctx) {
    ExprNode leftExpr = visitExpression(ctx.expression(0));
    if (!(leftExpr instanceof AssignExpr)) {
        diagnosisReporter.report(Diagnosis.Kind.ERROR, "AssignExpr required", ctx);
    }
    AssignExpr assignExpr = (AssignExpr) leftExpr;
    AssignableExpr to = assignExpr.getTo();
    ExprNode from = assignExpr.getFrom();
    ExprNode cond = visitExpression(ctx.expression(1));
    Token op = ctx.op;
    if (op != null) {
        String opStr = op.getText();
        BinaryExpr be = createBinaryExpr(to, cond, opStr);
        cond = be;
    }
    AssignExpr as = new AssignExpr(to, from);
    IfStmt is = new IfStmt(cond);
    is.getTrueBody().statements.add(new ExprStmt(as));
    mapAst(is,ctx);
    return is;
}
项目:codebuff    文件:Trainer.java   
public static int getInjectWSCategory(CodeBuffTokenStream tokens, int i) {
    int precedingNL = getPrecedingNL(tokens, i); // how many lines to inject

    Token curToken = tokens.get(i);
    Token prevToken = tokens.getPreviousRealToken(i);

    int ws = 0;
    if ( precedingNL==0 ) {
        ws = curToken.getCharPositionInLine() -
            (prevToken.getCharPositionInLine()+prevToken.getText().length());
    }

    int injectNL_WS = CAT_NO_WS;
    if ( precedingNL>0 ) {
        injectNL_WS = nlcat(precedingNL);
    }
    else if ( ws>0 ) {
        injectNL_WS = wscat(ws);
    }

    return injectNL_WS;
}
项目:sonar-tsql-plugin    文件:AntrlFileTest.java   
@Test
public void compareWithAntrl() {
    String s = "select " + "*" + "from dbo.test";
    AntrlResult result = Antlr4Utils.getFull(s);
    SourceLinesProvider p = new SourceLinesProvider();
    SourceLine[] lines = p.getLines(new StringBufferInputStream(s), Charset.defaultCharset());
    FillerRequest file = new FillerRequest(null, null, result.getTree(), lines);
    for (Token t : result.getStream().getTokens()) {
        if (t.getType() == Token.EOF) {
            continue;
        }
        int[] start = file.getLineAndColumn(t.getStartIndex());
        int[] end = file.getLineAndColumn(t.getStopIndex());
        Assert.assertNotNull(start);
        Assert.assertNotNull(end);
        Assert.assertEquals(t.getLine(), start[0]);
        System.out.println(t.getText() + Arrays.toString(start) + " " + t.getCharPositionInLine() + " "
                + t.getLine() + " " + Arrays.toString(end));
        Assert.assertEquals(t.getCharPositionInLine(), start[1]);
    }
}
项目:swift-js-transpiler    文件:SwiftSupport.java   
/**
 "If an operator has whitespace on the right side only, it is treated as a
 postfix unary operator. As an example, the ++ operator in a++ b is treated
 as a postfix unary operator."
 "If an operator has no whitespace on the left but is followed immediately
 by a dot (.), it is treated as a postfix unary operator. As an example,
 the ++ operator in a++.b is treated as a postfix unary operator (a++ .b
 rather than a ++ .b)."
 */
public static boolean isPostfixOp(TokenStream tokens) {
    int stop = getLastOpTokenIndex(tokens);
    if ( stop==-1 ) return false;

    int start = tokens.index();
    Token prevToken = tokens.get(start-1); // includes hidden-channel tokens
    Token nextToken = tokens.get(stop+1);
    boolean prevIsWS = isLeftOperatorWS(prevToken);
    boolean nextIsWS = isRightOperatorWS(nextToken);
    boolean result =
        !prevIsWS && nextIsWS ||
        !prevIsWS && nextToken.getType()==SwiftParser.DOT;
    String text = tokens.getText(Interval.of(start, stop));
    //System.out.println("isPostfixOp: '"+prevToken+"','"+text+"','"+nextToken+"' is "+result);
    return result;
}
项目:exterminator    文件:CoqSyntaxException.java   
private static String makeMessage(CoqFTParser parser, Token offendingToken,
        int line, int charPositionInLine, String msg) {
    StringBuilder sb = new StringBuilder();
    sb.append("ANTLR SYNTAX ERROR\n");

    sb.append("Offending line:\n");
    sb.append(underlineError(parser, offendingToken, line,
            charPositionInLine)).append("\n\n");

    sb.append("Rule stack:\n");
    List<String> stack = parser.getRuleInvocationStack();
    Collections.reverse(stack);
    sb.append(stack).append("\n\n");

    sb.append("Message:\n");
    sb.append("line ").append(line).append(":");
    sb.append(charPositionInLine).append(" ").append(msg);

    if(DEBUG_SHOW_FULL_TEXT) {
        sb.append("\n\nFull text:\n");
        CommonTokenStream tokens = (CommonTokenStream)parser.getInputStream();
        sb.append(tokens.getTokenSource().getInputStream().toString());
    }

    return sb.toString();
}
项目:codebuff    文件:IdentifyOversizeLists.java   
public void visitNonSingletonWithSeparator(ParserRuleContext ctx,
                                           List<? extends ParserRuleContext> siblings,
                                           Token separator)
{
    boolean oversize = isOversizeList(ctx, siblings, separator);
    Map<Token, Pair<Boolean, Integer>> tokenInfo =
        getInfoAboutListTokens(ctx, tokens, tokenToNodeMap, siblings, oversize);

    // copy sibling list info for associated tokens into overall list
    // but don't overwrite existing so that most general (largest construct)
    // list information is use/retained (i.e., not overwritten).
    for (Token t : tokenInfo.keySet()) {
        if ( !tokenToListInfo.containsKey(t) ) {
            tokenToListInfo.put(t, tokenInfo.get(t));
        }
    }
}
项目:ShapeChange    文件:Sbvr2FolVisitor.java   
@Override
public Expression visitNameExpr(NameExprContext ctx) {

    List<String> names = new ArrayList<String>();

    for (Token t : ctx.values) {
        String s = t.getText();
        // strip leading and trailing "'"
        names.add(s.substring(1, s.length() - 1));
    }

    if (names.size() == 1) {

        StringLiteral sl = new StringLiteral();
        sl.setValue(names.get(0));
        return sl;

    } else {

        StringLiteralList sll = new StringLiteralList();
        sll.setValues(names);

        return sll;
    }
}
项目:newton    文件:DenterHelper.java   
private void initIfFirstRun() {
  if (indentations.isEmpty()) {
    indentations.push(0);
    // First invocation. Look for the first non-NL. Enqueue it, and possibly an indentation if that non-NL
    // token doesn't start at char 0.
    Token firstRealToken;
    do {
      firstRealToken = pullToken();
    }
    while(firstRealToken.getType() == nlToken);

    if (firstRealToken.getCharPositionInLine() > 0) {
      indentations.push(firstRealToken.getCharPositionInLine());
      dentsBuffer.add(createToken(indentToken, firstRealToken));
    }
    dentsBuffer.add(firstRealToken);
  }
}
项目:codebuff    文件:Trainer.java   
public static int getMatchingSymbolStartsLine(Corpus corpus,
                                                  InputDocument doc,
                                                  TerminalNode node)
    {
        TerminalNode matchingLeftNode = getMatchingLeftSymbol(corpus, doc, node);
        if ( matchingLeftNode != null ) {
            Token matchingLeftToken = matchingLeftNode.getSymbol();
            int i = matchingLeftToken.getTokenIndex();
            if ( i==0 ) return 1; // first token is considered first on line
            Token tokenBeforeMatchingToken = doc.tokens.getPreviousRealToken(i);
//          System.out.printf("doc=%s node=%s, pair=%s, before=%s\n",
//                            new File(doc.fileName).getName(), node.getSymbol(), matchingLeftToken, tokenBeforeMatchingToken);
            if ( tokenBeforeMatchingToken!=null ) {
                return matchingLeftToken.getLine()>tokenBeforeMatchingToken.getLine() ? 1 : 0;
            }
            else { // matchingLeftToken must be first in file
                return 1;
            }
        }
        return NOT_PAIR;
    }
项目:codebuff    文件:Formatter.java   
public static void wipeCharPositionInfoAndWhitespaceTokens(CodeBuffTokenStream tokens) {
    tokens.fill();
    CommonToken dummy = new CommonToken(Token.INVALID_TYPE, "");
    dummy.setChannel(Token.HIDDEN_CHANNEL);
    Token firstRealToken = tokens.getNextRealToken(-1);
    for (int i = 0; i<tokens.size(); i++) {
        if ( i==firstRealToken.getTokenIndex() ) continue; // don't wack first token
        CommonToken t = (CommonToken)tokens.get(i);
        if ( t.getText().matches("\\s+") ) {
            tokens.getTokens().set(i, dummy); // wack whitespace token so we can't use it during prediction
        }
        else {
            t.setLine(0);
            t.setCharPositionInLine(-1);
        }
    }
}
项目:SwiftAnalyzer    文件:ListenerUtil.java   
/**
 * Returns location of the end multiline comment symbol.
 *
 * @param comment A token representing a comment
 * @return Location of the end symbol
 */
public static Location getEndOfMultilineComment(Token comment) {
    String commentText = comment.getText();
    if (commentText.charAt(commentText.length() - 1) == '\n') {
        commentText = commentText.substring(0, commentText.length() - 1);
    }
    int numNewlines = 0;
    int lastNewlineIndex = -1;
    for (int i = 0; i < commentText.length(); i++) {
        if (commentText.charAt(i) == '\n') {
            lastNewlineIndex = i;
            numNewlines += 1;
        }
    }
    String lastLine = commentText.substring(lastNewlineIndex + 1);
    return new Location(comment.getLine() + numNewlines,
        numNewlines == 0 ? comment.getCharPositionInLine() + lastLine.length() - 1 : lastLine.length() - 1);
}
项目:inmemantlr    文件:InmemantlrGrammar.java   
/**
 * We want to touch as little ANTR code as possible. We overload this
 * function to pretend the existence of the token vocab parser
 */
@Override
public void importTokensFromTokensFile() {
    if (!tokenVocab.isEmpty()) {
        MemoryTokenVocabParser vparser = new MemoryTokenVocabParser(this, tokenVocab);
        Map<String, Integer> tokens = vparser.load();

        int ret;
        for (String t : tokens.keySet()) {
            if (t.charAt(0) == '\'') {
                ret = defineStringLiteral(t, tokens.get(t));
                if (ret == Token.INVALID_TYPE)
                    throw new IllegalArgumentException("Token must not be INVAlID_TYPE");
            } else {
                ret = defineTokenName(t, tokens.get(t));
                if (ret == Token.INVALID_TYPE)
                    throw new IllegalArgumentException("Token must not be INVAlID_TYPE");
            }
            LOGGER.debug("token {} {}", t, tokens.get(t));
        }
    }
}
项目:kalang    文件:AstBuilder.java   
@Override
public Object visitBitShiftExpr(KalangParser.BitShiftExprContext ctx) {
    String op;
    Token opStart;
    if(ctx.left!=null){
        op = "<<";
        opStart = ctx.left;
    }else if(ctx.right!=null){
        op = ">>";
        opStart = ctx.right;
    }else if(ctx.uright!=null){
        op = ">>>";
        opStart = ctx.uright;
    }else{
        throw Exceptions.unexceptedValue(ctx);
    }
    return this.createBinaryExpr(op, ctx.expression(0), ctx.expression(1)
            , opStart,ctx.stop, ctx);
}
项目:antsdb    文件:ExprGenerator.java   
private static byte[] getBytes(Literal_value_binaryContext rule) {
    Token token = rule.STRING_LITERAL().getSymbol();
    byte[] bytes = new byte[token.getStopIndex() - token.getStartIndex() - 1];
    CharStream cs = token.getInputStream();
    int pos = cs.index();
    cs.seek(token.getStartIndex() + 1);
    int j = 0;
    for (int i = 0; i < bytes.length; i++) {
        int ch = cs.LA(i + 1);
        if (ch == '\\') {
            i++;
            ch = cs.LA(i + 1);
            if (ch == '0') {
                ch = 0;
            }
            else if (ch == 'n') {
                ch = '\n';
            }
            else if (ch == 'r') {
                ch = '\r';
            }
            else if (ch == 'Z') {
                ch = '\032';
            }
        }
        bytes[j] = (byte) ch;
        j++;
    }
    cs.seek(pos);
    if (j != bytes.length) {
        // esacpe characters
        byte[] old = bytes;
        bytes = new byte[j];
        System.arraycopy(old, 0, bytes, 0, j);
    }
    return bytes;
}
项目:ksql    文件:AstBuilder.java   
private static ArithmeticBinaryExpression.Type getArithmeticBinaryOperator(Token operator) {
  switch (operator.getType()) {
    case SqlBaseLexer.PLUS:
      return ArithmeticBinaryExpression.Type.ADD;
    case SqlBaseLexer.MINUS:
      return ArithmeticBinaryExpression.Type.SUBTRACT;
    case SqlBaseLexer.ASTERISK:
      return ArithmeticBinaryExpression.Type.MULTIPLY;
    case SqlBaseLexer.SLASH:
      return ArithmeticBinaryExpression.Type.DIVIDE;
    case SqlBaseLexer.PERCENT:
      return ArithmeticBinaryExpression.Type.MODULUS;
    default:
      throw new UnsupportedOperationException("Unsupported operator: " + operator.getText());
  }
}
项目:rapidminer    文件:CapitulatingErrorStrategy.java   
@Override
protected void reportUnwantedToken(Parser recognizer) {
    // change error message from default implementation
    if (inErrorRecoveryMode(recognizer)) {
        return;
    }

    beginErrorCondition(recognizer);

    Token t = recognizer.getCurrentToken();
    String tokenName = getTokenErrorDisplay(t);
    String msg = "extraneous input " + tokenName + " expecting operator";
    recognizer.notifyErrorListeners(t, msg, null);
}
项目:elasticsearch_my    文件:EnhancedPainlessLexer.java   
private static boolean insertSemicolon(Token previous, Token next) {
    if (previous == null || next.getType() != PainlessLexer.RBRACK) {
        return false;
    }
    switch (previous.getType()) {
    case PainlessLexer.RBRACK:     // };} would be weird!
    case PainlessLexer.SEMICOLON:  // already have a semicolon, no need to add one
    case PainlessLexer.LBRACK:     // empty blocks don't need a semicolon
        return false;
    default:
        return true;
    }
}
项目:elasticsearch_my    文件:ParserErrorStrategy.java   
@Override
public Token recoverInline(final Parser recognizer) throws RecognitionException {
    final Token token = recognizer.getCurrentToken();
    final String message = "unexpected token [" + getTokenErrorDisplay(token) + "]" +
        " was expecting one of [" + recognizer.getExpectedTokens().toString(recognizer.getVocabulary()) + "].";

    Location location = new Location(sourceName, token.getStartIndex());
    throw location.createError(new IllegalArgumentException(message));
}