Java 类org.antlr.v4.runtime.atn.PredictionMode 实例源码

项目:elasticsearch_my    文件:Walker.java   
private void setupPicky(PainlessParser parser) {
    // Diagnostic listener invokes syntaxError on other listeners for ambiguity issues,
    parser.addErrorListener(new DiagnosticErrorListener(true));
    // a second listener to fail the test when the above happens.
    parser.addErrorListener(new BaseErrorListener() {
        @Override
        public void syntaxError(final Recognizer<?,?> recognizer, final Object offendingSymbol, final int line,
                                final int charPositionInLine, final String msg, final RecognitionException e) {
            throw new AssertionError("line: " + line + ", offset: " + charPositionInLine +
                ", symbol:" + offendingSymbol + " " + msg);
        }
    });

    // Enable exact ambiguity detection (costly). we enable exact since its the default for
    // DiagnosticErrorListener, life is too short to think about what 'inexact ambiguity' might mean.
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
}
项目:clarpse    文件:ClarpseGoCompiler.java   
private void compileFiles(List<RawFile> files, OOPSourceCodeModel srcModel, List<String> projectFileTypes) {
    for (RawFile file : files) {
        try {
            CharStream charStream = new ANTLRInputStream(file.content());
            GolangLexer lexer = new GolangLexer(charStream);
            TokenStream tokens = new CommonTokenStream(lexer);
            GolangParser parser = new GolangParser(tokens);
            SourceFileContext sourceFileContext = parser.sourceFile();
            parser.setErrorHandler(new BailErrorStrategy());
            parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
            ParseTreeWalker walker = new ParseTreeWalker();
            GolangBaseListener listener = new GoLangTreeListener(srcModel, projectFileTypes, file);
            walker.walk(listener, sourceFileContext);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
项目:mdetect    文件:ParseUtils.java   
public static Pair<Parser, Lexer> parsePHP(String filePath) {
    AntlrCaseInsensitiveFileStream input;
try {
    input = new AntlrCaseInsensitiveFileStream(filePath);
} catch (IOException e) {
    e.printStackTrace();
    return null;
}
      PHPLexer lexer = new PHPLexer(input);
      CommonTokenStream tokens = new CommonTokenStream(lexer);
      PHPParser parser = new InterruptablePHPParser(tokens, filePath);
      /* turn on prediction mode to speed up parsing */
      parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
      Pair<Parser, Lexer> retval = new Pair<Parser, Lexer>(parser, lexer);
      return retval;
  }
项目:groovy    文件:DescriptiveErrorStrategy.java   
@Override
public void recover(Parser recognizer, RecognitionException e) {
    for (ParserRuleContext context = recognizer.getContext(); context != null; context = context.getParent()) {
        context.exception = e;
    }

    if (PredictionMode.LL.equals(recognizer.getInterpreter().getPredictionMode())) {
        if (e instanceof NoViableAltException) {
            this.reportNoViableAlternative(recognizer, (NoViableAltException) e);
        } else if (e instanceof InputMismatchException) {
            this.reportInputMismatch(recognizer, (InputMismatchException) e);
        } else if (e instanceof FailedPredicateException) {
            this.reportFailedPredicate(recognizer, (FailedPredicateException) e);
        }
    }

    throw new ParseCancellationException(e);
}
项目:compiler    文件:BoaCompiler.java   
private static Start parse(final CommonTokenStream tokens, final BoaParser parser, final BoaErrorListener parserErrorListener) {
    parser.setBuildParseTree(false);
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);

    try {
        return parser.start().ast;
    } catch (final ParseCancellationException e) {
        // fall-back to LL mode parsing if SLL fails
        tokens.reset();
        parser.reset();

        parser.removeErrorListeners();
        parser.addErrorListener(parserErrorListener);
        parser.getInterpreter().setPredictionMode(PredictionMode.LL);

        return parser.start().ast;
    }
}
项目:hypertalk-java    文件:TwoPhaseParser.java   
/**
 * "Second phase" parsing attempt. Will accept any valid HyperTalk script entry, but is less performant for inputs
 * utilizing certain parts of the grammar.
 *
 * @param compilationUnit The unit of work to compile/parse. Represents the grammar's start symbol that should be
 *                        used.
 * @param scriptText A plaintext representation of the HyperTalk script to parse
 * @return The root of the abstract syntax tree associated with the given compilation unit (i.e., {@link Script}).
 * @throws HtSyntaxException Thrown if an error occurs while parsing the script.
 */
static Object parseLL(CompilationUnit compilationUnit, String scriptText) throws HtSyntaxException {
    HyperTalkErrorListener errors = new HyperTalkErrorListener();
    HyperTalkLexer lexer = new HyperTalkLexer(new CaseInsensitiveInputStream(scriptText));
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    HyperTalkParser parser = new HyperTalkParser(tokens);

    parser.setErrorHandler(new DefaultErrorStrategy());
    parser.getInterpreter().setPredictionMode(PredictionMode.LL);
    parser.removeErrorListeners();        // don't log to console
    parser.addErrorListener(errors);

    try {
        ParseTree tree = compilationUnit.getParseTree(parser);

        if (!errors.errors.isEmpty()) {
            throw errors.errors.get(0);
        }

        return new HyperTalkTreeVisitor().visit(tree);
    } catch (RecognitionException e) {
        throw new HtSyntaxException(e);
    }
}
项目:hypertalk-java    文件:TwoPhaseParser.java   
/**
 * "First phase" parsing attempt. Provides better performance than {@link #parseLL(CompilationUnit, String)}, but
 * will erroneously report syntax errors when parsing script text utilizing certain parts of the grammar.
 *
 * @param compilationUnit The unit of work to compile/parse. Represents the grammar's start symbol that should be
 *                        used.
 * @param scriptText A plaintext representation of the HyperTalk script to parse
 * @return The root of the abstract syntax tree associated with the given compilation unit (i.e., {@link Script}),
 * or null if parsing fails.
 */
static Object parseSLL(CompilationUnit compilationUnit, String scriptText) {
    HyperTalkLexer lexer = new HyperTalkLexer(new CaseInsensitiveInputStream(scriptText));
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    HyperTalkParser parser = new HyperTalkParser(tokens);

    parser.setErrorHandler(new BailErrorStrategy());
    parser.removeErrorListeners();
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);

    try {
        ParseTree tree = compilationUnit.getParseTree(parser);
        return new HyperTalkTreeVisitor().visit(tree);
    } catch (ParseCancellationException e) {
        return null;
    }
}
项目:goworks    文件:ParserFactory.java   
@NonNull
public CodeCompletionGoParser getParser(@NonNull TokenStream input) {
    CodeCompletionGoParser parser = createParser(input);

    parser.removeErrorListeners();
    parser.setBuildParseTree(false);
    parser.setErrorHandler(new DefaultErrorStrategy());
    parser.getInterpreter().setPredictionMode(PredictionMode.LL);
    parser.getInterpreter().force_global_context = false;
    parser.getInterpreter().always_try_local_context = true;

    parser.setCheckPackageNames(false);
    parser.setPackageNames(Collections.<String>emptyList());

    return parser;
}
项目:ethereumj    文件:ParserUtils.java   
public static <P extends Parser> P getParser(Class<? extends Lexer> lexerClass, Class<P> parserClass, String source) {
  Lexer lexer = getLexer(lexerClass, source);
  TokenStream tokens = new CommonTokenStream(lexer);

  P parser;
  try {
    parser = parserClass.getConstructor(TokenStream.class).newInstance(tokens);
  } catch (Exception e) {
    throw new IllegalArgumentException("couldn't invoke parser constructor", e);
  }
  parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
  parser.removeErrorListeners(); // don't spit to stderr
  parser.addErrorListener(new DiagnosticErrorListener());
  parser.addErrorListener(new AntlrFailureListener());

  return parser;
}
项目:antlr-denter    文件:ParserUtils.java   
public static <P extends Parser> P getParser(Class<? extends Lexer> lexerClass, Class<P> parserClass, String source) {
  Lexer lexer = getLexer(lexerClass, source);
  TokenStream tokens = new CommonTokenStream(lexer);

  P parser;
  try {
    parser = parserClass.getConstructor(TokenStream.class).newInstance(tokens);
  } catch (Exception e) {
    throw new IllegalArgumentException("couldn't invoke parser constructor", e);
  }
  parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
  parser.removeErrorListeners(); // don't spit to stderr
  parser.addErrorListener(new DiagnosticErrorListener());
  parser.addErrorListener(new AntlrFailureListener());

  return parser;
}
项目:rainbow    文件:SqlParser.java   
private Node invokeParser(String name, String sql, Function<SqlBaseParser, ParserRuleContext> parseFunction, ParsingOptions parsingOptions)
{
    try {
        SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(new ANTLRInputStream(sql)));
        CommonTokenStream tokenStream = new CommonTokenStream(lexer);
        SqlBaseParser parser = new SqlBaseParser(tokenStream);

        parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames())));

        lexer.removeErrorListeners();
        lexer.addErrorListener(ERROR_LISTENER);

        parser.removeErrorListeners();
        parser.addErrorListener(ERROR_LISTENER);

        ParserRuleContext tree;
        try {
            // first, try parsing with potentially faster SLL mode
            parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
            tree = parseFunction.apply(parser);
        }
        catch (ParseCancellationException ex) {
            // if we fail, parse with LL mode
            tokenStream.reset(); // rewind input stream
            parser.reset();

            parser.getInterpreter().setPredictionMode(PredictionMode.LL);
            tree = parseFunction.apply(parser);
        }

        return new AstBuilder(parsingOptions).visit(tree);
    }
    catch (StackOverflowError e) {
        throw new ParsingException(name + " is too large (stack overflow while parsing)");
    }
}
项目:rainbow    文件:TypeCalculation.java   
private static ParserRuleContext parseTypeCalculation(String calculation)
{
    TypeCalculationLexer lexer = new TypeCalculationLexer(new CaseInsensitiveStream(new ANTLRInputStream(calculation)));
    CommonTokenStream tokenStream = new CommonTokenStream(lexer);
    TypeCalculationParser parser = new TypeCalculationParser(tokenStream);

    lexer.removeErrorListeners();
    lexer.addErrorListener(ERROR_LISTENER);

    parser.removeErrorListeners();
    parser.addErrorListener(ERROR_LISTENER);

    ParserRuleContext tree;
    try {
        // first, try parsing with potentially faster SLL mode
        parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
        tree = parser.typeCalculation();
    }
    catch (ParseCancellationException ex) {
        // if we fail, parse with LL mode
        tokenStream.reset(); // rewind input stream
        parser.reset();

        parser.getInterpreter().setPredictionMode(PredictionMode.LL);
        tree = parser.typeCalculation();
    }
    return tree;
}
项目:OperatieBRP    文件:ExpressieParser.java   
private static Expressie doParse(final String expressieString, final Context context) {
    // TEAMBRP-2535 de expressie syntax kan niet goed omgaan met een string waarachter ongedefinieerde velden staan.
    // Door haakjes toe te voegen zal dan een fout gesignaleerd worden, aangezien de content dan niet meer precies
    // gematched kan worden.
    final String expressieStringMetHaakjes = String.format("(%s)", expressieString);

    // Parsing gebeurt met een door ANTLR gegenereerde visitor. Om die te kunnen gebruiken, moet een treintje
    // opgetuigd worden (String->CharStream->Lexer->TokenStream).
    final CharStream cs = CharStreams.fromString(expressieStringMetHaakjes);
    final ParserErrorListener parserErrorListener = new ParserErrorListener();
    final BRPExpressietaalLexer lexer = new BRPExpressietaalLexer(cs);
    // verwijdert de interne listener van de lexer die naar system/out/err wegschrijft
    // expressies als ***bla*** logt bijvoorbeeld unrecognized token errors naar system/out
    // in plaats hiervan neem een eigen error listener op.
    lexer.removeErrorListeners();
    lexer.addErrorListener(parserErrorListener);
    final CommonTokenStream tokens = new CommonTokenStream(lexer);

    final BRPExpressietaalParser parser = new BRPExpressietaalParser(tokens);
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);

    // Verwijder bestaande (default) error listeners en voeg de eigen error listener toe.
    parser.removeErrorListeners();
    parser.addErrorListener(parserErrorListener);

    // Maak de parse tree. Hier gebeurt het feitelijke parsing.
    final BRPExpressietaalParser.Brp_expressieContext tree = parser.brp_expressie();

    // Maak een visitor voor parsing.
    final ExpressieVisitor visitor = new ExpressieVisitor(context);
    // De visitor zet een parse tree om in een Expressie. Tenzij er een fout optreedt.
    return visitor.visit(tree);
}
项目:exterminator    文件:CoqFTParser.java   
public static Term parseTerm(String s, boolean trySLL) {
    CoqFTParser p = new CoqFTParser(s);
    if(trySLL) {
        p.getInterpreter().setPredictionMode(PredictionMode.SLL);
        p.setErrorHandler(new BailErrorStrategy());
        try {
            return p.parseTerm();
        } catch(ParseCancellationException | CoqSyntaxException e) {
            p = new CoqFTParser(s);
        }
    }
    return p.parseTerm();
}
项目:exterminator    文件:CoqFTParser.java   
public static Tactic parseTactic(String s, boolean trySLL) {
    CoqFTParser p = new CoqFTParser(s);
    if(trySLL) {
        p.getInterpreter().setPredictionMode(PredictionMode.SLL);
        p.setErrorHandler(new BailErrorStrategy());
        try {
            return p.parseTactic();
        } catch(ParseCancellationException | CoqSyntaxException e) {
            p = new CoqFTParser(s);
        }
    }
    return p.parseTactic();
}
项目:ftc    文件:Util.java   
public static VerboseErrorListener addVerboseErrorListener(FusionTablesSqlParser parser)
{
    VerboseErrorListener verbose = new VerboseErrorListener();
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
    parser.removeErrorListeners();
    parser.addErrorListener(verbose);
    return verbose;
}
项目:yql-plus    文件:ProgramParser.java   
private ProgramContext parseProgram(yqlplusParser parser) throws RecognitionException {
    try {
        return parser.program();
    } catch (RecognitionException e) {
        //Retry parsing using full LL mode
        parser.reset();
        parser.getInterpreter().setPredictionMode(PredictionMode.LL);
        return parser.program();
    }
}
项目:Alpha    文件:AnswerSetsParser.java   
public static Set<AnswerSet> parse(CharStream stream) throws IOException {
    final ASPCore2Parser parser = new ASPCore2Parser(new CommonTokenStream(new ASPCore2Lexer(stream)));

    // Try SLL parsing mode (faster but may terminate incorrectly).
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    parser.removeErrorListeners();
    parser.setErrorHandler(new BailErrorStrategy());

    return VISITOR.translate(parser.answer_sets());
}
项目:vespa    文件:ProgramParser.java   
private ProgramContext parseProgram(yqlplusParser parser) throws  RecognitionException {
    try {
        return parser.program();
    } catch (RecognitionException e) {
        //Retry parsing using full LL mode
        parser.reset();
        parser.getInterpreter().setPredictionMode(PredictionMode.LL);
        return parser.program();
    }
}
项目:ksql    文件:KsqlParser.java   
private ParserRuleContext getParseTree(String sql) {

    SqlBaseLexer
        sqlBaseLexer =
        new SqlBaseLexer(new CaseInsensitiveStream(new ANTLRInputStream(sql)));
    CommonTokenStream tokenStream = new CommonTokenStream(sqlBaseLexer);
    SqlBaseParser sqlBaseParser = new SqlBaseParser(tokenStream);

    sqlBaseLexer.removeErrorListeners();
    sqlBaseLexer.addErrorListener(ERROR_LISTENER);

    sqlBaseParser.removeErrorListeners();
    sqlBaseParser.addErrorListener(ERROR_LISTENER);

    Function<SqlBaseParser, ParserRuleContext> parseFunction = SqlBaseParser::statements;
    ParserRuleContext tree;
    try {
      // first, try parsing with potentially faster SLL mode
      sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.SLL);
      tree = parseFunction.apply(sqlBaseParser);
    } catch (ParseCancellationException ex) {
      // if we fail, parse with LL mode
      tokenStream.reset(); // rewind input stream
      sqlBaseParser.reset();

      sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.LL);
      tree = parseFunction.apply(sqlBaseParser);
    }

    return tree;
  }
项目:groovy    文件:AstBuilder.java   
private GroovyParserRuleContext buildCST(PredictionMode predictionMode) {
    parser.getInterpreter().setPredictionMode(predictionMode);

    if (PredictionMode.SLL.equals(predictionMode)) {
        this.removeErrorListeners();
    } else {
        parser.getInputStream().seek(0);
        this.addErrorListeners();
    }

    return parser.compilationUnit();
}
项目:presto    文件:SqlParser.java   
private Node invokeParser(String name, String sql, Function<SqlBaseParser, ParserRuleContext> parseFunction)
{
    try {
        SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(new ANTLRInputStream(sql)));
        CommonTokenStream tokenStream = new CommonTokenStream(lexer);
        SqlBaseParser parser = new SqlBaseParser(tokenStream);

        parser.addParseListener(new PostProcessor());

        lexer.removeErrorListeners();
        lexer.addErrorListener(ERROR_LISTENER);

        parser.removeErrorListeners();
        parser.addErrorListener(ERROR_LISTENER);

        ParserRuleContext tree;
        try {
            // first, try parsing with potentially faster SLL mode
            parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
            tree = parseFunction.apply(parser);
        }
        catch (ParseCancellationException ex) {
            // if we fail, parse with LL mode
            tokenStream.reset(); // rewind input stream
            parser.reset();

            parser.getInterpreter().setPredictionMode(PredictionMode.LL);
            tree = parseFunction.apply(parser);
        }

        return new AstBuilder().visit(tree);
    }
    catch (StackOverflowError e) {
        throw new ParsingException(name + " is too large (stack overflow while parsing)");
    }
}
项目:Trinity    文件:ParserTest.java   
private static TrinityParser createParser(ANTLRInputStream input) throws IOException {
    TrinityLexer lexer = new TrinityLexer(input);
    CommonTokenStream tokens = new CommonTokenStream(lexer);
    TrinityParser parser = new TrinityParser(tokens);
    parser.removeErrorListeners();
    parser.addErrorListener(new DiagnosticErrorListener());
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
    return parser;
}
项目:reviki    文件:CreoleRenderer.java   
/**
 * Try to run a parser, resetting the input on failure.
 *
 * @param tokens The token stream. Consumed by the parser, and reset on
 *          failure.
 * @param parser The parser. Reset on failure.
 * @param pmode The prediction mode.
 * @return A parse tree.
 */
private static Optional<ParseTree> tryParse(final CommonTokenStream tokens, final Creole parser, final PredictionMode pmode) {
  parser.getInterpreter().setPredictionMode(pmode);

  try {
    return Optional.of((ParseTree) parser.creole());
  }
  catch (Exception e) {
    tokens.reset();
    parser.reset();

    return Optional.<ParseTree> absent();
  }
}
项目:reviki    文件:CreoleRenderer.java   
/**
 * Render a stream of text.
 *
 * @param in The input stream to render.
 * @param visitor The visitor to do the rendering.
 * @param macros List of macros to apply.
 * @param reset Whether to reset the expansion limit or not.
 * @return The AST of the page, after macro expansion.
 */
private static ASTNode renderInternal(final ANTLRInputStream in, final CreoleASTBuilder visitor, final Supplier<List<Macro>> macros, final CreoleTokens lexer, final boolean reset) {
  lexer.setInputStream(in);
  CommonTokenStream tokens = new CommonTokenStream(lexer);
  Creole parser = new Creole(tokens);

  // First try parsing in SLL mode. This is really fast for pages with no
  // parse errors.
  Optional<ParseTree> tree = tryParse(tokens, parser, PredictionMode.SLL);

  if (!tree.isPresent()) {
    tree = tryParse(tokens, parser, PredictionMode.LL);
  }

  ASTNode rendered = visitor.visit(tree.get());

  // Expand macros
  if (reset) {
    _expansionLimit = MACRO_DEPTH_LIMIT;
  }

  ASTNode expanded = rendered;

  if (_expansionLimit > 0) {
    _expansionLimit--;
    expanded = rendered.expandMacros(macros);
    _expansionLimit++;
  }

  return expanded;
}
项目:goworks    文件:GrammarParserFactory.java   
@NonNull
public GrammarParser getParser(@NonNull TokenStream input) {
    GrammarParser result = createParser(input);
    result.getInterpreter().setPredictionMode(PredictionMode.LL);
    result.removeErrorListeners();
    result.addErrorListener(DescriptiveErrorListener.INSTANCE);
    result.setBuildParseTree(false);
    result.setErrorHandler(new DefaultErrorStrategy());
    return result;
}
项目:goworks    文件:TemplateParserFactory.java   
@NonNull
public TemplateParser getParser(@NonNull TokenStream input) {
    TemplateParser result = createParser(input);
    result.getInterpreter().setPredictionMode(PredictionMode.LL);
    result.removeErrorListeners();
    result.addErrorListener(DescriptiveErrorListener.INSTANCE);
    result.setBuildParseTree(false);
    result.setErrorHandler(new DefaultErrorStrategy());
    return result;
}
项目:antlrworks2    文件:GrammarParserFactory.java   
@NonNull
public GrammarParser getParser(@NonNull TokenStream input) {
    GrammarParser result = createParser(input);
    result.getInterpreter().setPredictionMode(PredictionMode.LL);
    result.removeErrorListeners();
    result.addErrorListener(DescriptiveErrorListener.INSTANCE);
    result.setBuildParseTree(false);
    result.setErrorHandler(new DefaultErrorStrategy());
    return result;
}
项目:antlrworks2    文件:TemplateParserFactory.java   
@NonNull
public TemplateParser getParser(@NonNull TokenStream input) {
    TemplateParser result = createParser(input);
    result.getInterpreter().setPredictionMode(PredictionMode.LL);
    result.removeErrorListeners();
    result.addErrorListener(DescriptiveErrorListener.INSTANCE);
    result.setBuildParseTree(false);
    result.setErrorHandler(new DefaultErrorStrategy());
    return result;
}
项目:dragoman    文件:BaseParser.java   
private SQLParser withStrictMode(SQLParser parser) {
  // if the faster (more lenient) mode fails then retry with the slower (stricter) mode
  parser.getInterpreter().setPredictionMode(PredictionMode.LL);

  return parser;
}
项目:ftc    文件:Util.java   
public static void addDebugListeners(FusionTablesSqlParser parser) {
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
    parser.removeErrorListeners();
    parser.addErrorListener(new DiagnosticErrorListener());
    parser.addErrorListener(new VerboseErrorListener());
}
项目:codebuff    文件:TestRig.java   
protected void process(Lexer lexer, Class<? extends Parser> parserClass, Parser parser, InputStream is, Reader r) throws IOException, IllegalAccessException, InvocationTargetException, PrintException {
    try {
        ANTLRInputStream input = new ANTLRInputStream(r);
        lexer.setInputStream(input);
        CommonTokenStream tokens = new CommonTokenStream(lexer);

        tokens.fill();

        if ( showTokens ) {
            for (Object tok : tokens.getTokens()) {
                System.out.println(tok);
            }
        }

        if ( startRuleName.equals(LEXER_START_RULE_NAME) ) return;

        if ( diagnostics ) {
            parser.addErrorListener(new DiagnosticErrorListener());
            parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
        }

        if ( printTree || gui || psFile!=null ) {
            parser.setBuildParseTree(true);
        }

        if ( SLL ) { // overrides diagnostics
            parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
        }

        parser.setTokenStream(tokens);
        parser.setTrace(trace);

        try {
            Method startRule = parserClass.getMethod(startRuleName);
            ParserRuleContext tree = (ParserRuleContext)startRule.invoke(parser, (Object[])null);

            if ( printTree ) {
                System.out.println(tree.toStringTree(parser));
            }
            if ( gui ) {
                Trees.inspect(tree, parser);
            }
            if ( psFile!=null ) {
                Trees.save(tree, parser, psFile); // Generate postscript
            }
        }
        catch (NoSuchMethodException nsme) {
            System.err.println("No method for rule "+startRuleName+" or it has arguments");
        }
    }
    finally {
        if ( r!=null ) r.close();
        if ( is!=null ) is.close();
    }
}
项目:Alpha    文件:ProgramParser.java   
public Program parse(CharStream stream) throws IOException {
    /*
    // In order to require less memory: use unbuffered streams and avoid constructing a full parse tree.
    ASPCore2Lexer lexer = new ASPCore2Lexer(new UnbufferedCharStream(is));
    lexer.setTokenFactory(new CommonTokenFactory(true));
    final ASPCore2Parser parser = new ASPCore2Parser(new UnbufferedTokenStream<>(lexer));
    parser.setBuildParseTree(false);
    */
    CommonTokenStream tokens = new CommonTokenStream(
        new ASPCore2Lexer(stream)
    );
    final ASPCore2Parser parser = new ASPCore2Parser(tokens);

    // Try SLL parsing mode (faster but may terminate incorrectly).
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    parser.removeErrorListeners();
    parser.setErrorHandler(new BailErrorStrategy());

    final CustomErrorListener errorListener = new CustomErrorListener(stream.getSourceName());

    ASPCore2Parser.ProgramContext programContext;
    try {
        // Parse program
        programContext = parser.program();
    } catch (ParseCancellationException e) {
        // Recognition exception may be caused simply by SLL parsing failing,
        // retry with LL parser and DefaultErrorStrategy printing errors to console.
        if (e.getCause() instanceof RecognitionException) {
            tokens.seek(0);
            parser.addErrorListener(errorListener);
            parser.setErrorHandler(new DefaultErrorStrategy());
            parser.getInterpreter().setPredictionMode(PredictionMode.LL);
            // Re-run parse.
            programContext = parser.program();
        } else {
            throw e;
        }
    }

    // If the our SwallowingErrorListener has handled some exception during parsing
    // just re-throw that exception.
    // At this time, error messages will be already printed out to standard error
    // because ANTLR by default adds an org.antlr.v4.runtime.ConsoleErrorListener
    // to every parser.
    // That ConsoleErrorListener will print useful messages, but not report back to
    // our code.
    // org.antlr.v4.runtime.BailErrorStrategy cannot be used here, because it would
    // abruptly stop parsing as soon as the first error is reached (i.e. no recovery
    // is attempted) and the user will only see the first error encountered.
    if (errorListener.getRecognitionException() != null) {
        throw errorListener.getRecognitionException();
    }

    // Abort parsing if there were some (recoverable) syntax errors.
    if (parser.getNumberOfSyntaxErrors() != 0) {
        throw new ParseCancellationException();
    }

    // Construct internal program representation.
    ParseTreeVisitor visitor = new ParseTreeVisitor(externals);
    return visitor.translate(programContext);
}
项目:rpgleparser    文件:TestFiles.java   
@Test
public void test() throws IOException, URISyntaxException{
    final String inputString = TestUtils.loadFile(sourceFile);
    final File expectedFile = new File(sourceFile.getPath().replaceAll("\\.rpgle", ".expected.txt"));
    final String expectedFileText = expectedFile.exists()?TestUtils.loadFile(expectedFile):null;
    final String expectedTokens = getTokens(expectedFileText);
    String expectedTree = getTree(expectedFileText);
    final List<String> errors = new ArrayList<String>();
       final ANTLRInputStream input = new ANTLRInputStream(new FixedWidthBufferedReader(inputString));
    final RpgLexer rpglexer = new RpgLexer(input);
       final TokenSource lexer = new PreprocessTokenSource(rpglexer);
       final CommonTokenStream tokens = new CommonTokenStream(lexer);

       final RpgParser parser = new RpgParser(tokens);

       final ErrorListener errorListener = new ErrorListener(errors, rpglexer, parser);
       rpglexer.addErrorListener(errorListener);
       parser.addErrorListener(errorListener);

    final String actualTokens = TestUtils.printTokens(lexer,rpglexer.getVocabulary());
       boolean rewriteExpectFile=false;
    if(expectedTokens != null && expectedTokens.trim().length()>0 ){
        if(autoReplaceFailed && !expectedTokens.equals(actualTokens)){
            rewriteExpectFile=true;
        }else{
            assertEquals("Token lists do not match",expectedTokens,actualTokens);
        }
    }
    rpglexer.reset();

    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    parser.reset();
    final ParseTree parseTree = parser.r();

    final String actualTree = TreeUtils.printTree(parseTree, parser);
    if(!errors.isEmpty()){
        System.out.println("/*===TOKENS===*/\r\n" + actualTokens + "\r\n");
        System.out.println("/*===TREE===*/\r\n" + actualTree + "\r\n/*======*/");
    }
    assertThat(errors, is(empty()));

    if(expectedTree==null || expectedTree.trim().length() == 0||rewriteExpectFile){
        writeExpectFile(expectedFile,actualTokens,actualTree);
        System.out.println("Tree written to " + expectedFile);
    }else{
        if(autoReplaceFailed && !actualTree.equals(expectedTree)){
            System.out.println("Replaced content of " + expectedFile);
            expectedTree = actualTree;
            writeExpectFile(expectedFile,actualTokens,actualTree);
        }
        assertEquals("Parse trees do not match",expectedTree,actualTree);
       }
}
项目:jamweaver    文件:WebkitGenerator.java   
protected SpecificationContext parse(Lexer lexer, Parser parser, InputStream is, Reader r) throws IOException, IllegalAccessException, PrintException {
  ANTLRInputStream input = new ANTLRInputStream(r);
  lexer.setInputStream(input);
  CommonTokenStream tokens = new CommonTokenStream(lexer);

  tokens.fill();

  if (options.showTokens) {
    for (Object tok : tokens.getTokens()) {
      System.out.println(tok);
    }
  }

  if (options.diagnostics) {
    parser.addErrorListener(new DiagnosticErrorListener());
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
  }

  parser.setBuildParseTree(true);

  // SLL overrides diagnostics
  // %%% Not sure what it really is though.
  if (options.sll) {
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
  }

  parser.setTokenStream(tokens);
  parser.setTrace(options.trace);

  SpecificationContext tree = ((WebkitIDLParser)parser).specification();

  if (options.printTree) {
    System.out.println(tree.toStringTree(parser));
  }
  if (options.gui) {
    tree.inspect(parser);
  }
  if (options.psFile != null) {
    tree.save(parser, options.psFile); // Generate postscript
  }

  return tree;
}
项目:jamweaver    文件:GeckoGenerator.java   
protected SpecificationContext parse(Lexer lexer, Parser parser, InputStream is, Reader r) throws IOException, IllegalAccessException, PrintException {
  ANTLRInputStream input = new ANTLRInputStream(r);
  lexer.setInputStream(input);
  CommonTokenStream tokens = new CommonTokenStream(lexer);

  tokens.fill();

  if (options.showTokens) {
    for (Object tok : tokens.getTokens()) {
      System.out.println(tok);
    }
  }

  if (options.diagnostics) {
    parser.addErrorListener(new DiagnosticErrorListener());
    parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
  }

  parser.setBuildParseTree(true);

  // SLL overrides diagnostics
  // %%% Not sure what it really is though.
  if (options.sll) {
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
  }

  parser.setTokenStream(tokens);
  parser.setTrace(options.trace);

  SpecificationContext tree = ((GeckoIDLParser)parser).specification();

  if (options.printTree) {
    System.out.println(tree.toStringTree(parser));
  }
  if (options.gui) {
    tree.inspect(parser);
  }
  if (options.psFile != null) {
    tree.save(parser, options.psFile); // Generate postscript
  }

  return tree;
}
项目:antlr4-regressionTestRig    文件:RegressionTestRig.java   
/**
 * Load the parser as requested by the command line arguments, and then setup
 * the parser for the 'diagnostics', 'printTree' or 'SLL' options.
 * <p>
 * The {@link #parser} and {@link #treePrinter} variables my be null if no 
 * parser was requested (that is, if the start rule name is the 
 * LEXER_START_RULE_NAME), or if the requested parser could not be loaded or
 * instantiated.
 * <p>
 * The {@link #treePrinter} variable may also be null if the printTree option 
 * has not been requested.
 * <p>
 * @throws various exceptions while loading the parser class and instantiating
 *                 a parser instance.
 */
protected void loadParser() throws ClassNotFoundException, NoSuchMethodException, 
 InstantiationException, IllegalAccessException, InvocationTargetException {

ClassLoader cl = Thread.currentThread().getContextClassLoader();
if ( !startRuleName.equals(LEXER_START_RULE_NAME) ) {
    String parserName = grammarName+"Parser";
    parserClass = null;
    try {
      parserClass = cl.loadClass(parserName).asSubclass(Parser.class);
    }   catch (ClassNotFoundException cnfe) {
      System.err.println("ERROR: Can't load "+parserName+" as a parser");
      throw cnfe;
    }

    try {
      Constructor<? extends Parser> parserCtor = parserClass.getConstructor(TokenStream.class);
      parser = parserCtor.newInstance((TokenStream)null);
    } catch (Exception anException) {
      System.err.println("ERROR: Could not create a parser for "+parserName);
      throw anException;
    }

        if ( diagnostics ) {
        parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
    }

    if ( printTree ) {
        parser.setBuildParseTree(true);
            treePrinter = new TreePrinter(primaryIndentStr,
                                          secondaryIndentStr,
                                          indentCyclePeriod,
                                          parser);
    }

    if ( trace ) {
      traceListener = new PrintStreamTraceListener(parser);
      parser.addParseListener(traceListener);
    }             

    if ( SLL ) { // overrides diagnostics
        parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    }
}
}
项目:goworks    文件:ParserDebuggerReferenceAnchorsParserTask.java   
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    //ParserDebuggerEditorKit.LEX
    synchronized (lock) {
        ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        if (fileParseResultData == null || parseTreeResult == null) {
            Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
            Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
            TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
            InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
            ParserRuleContext parseResult;

            ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
            String grammarFileName = parserInterpreterData.grammarFileName;
            Vocabulary vocabulary = parserInterpreterData.vocabulary;
            List<String> ruleNames = parserInterpreterData.ruleNames;
            ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
            TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);

            long startTime = System.nanoTime();
            parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
            parser.getInterpreter().optimize_ll1 = false;
            parser.getInterpreter().reportAmbiguities = true;
            parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
            parser.removeErrorListeners();
            parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
            parser.addErrorListener(new StatisticsParserErrorListener());
            SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
            parser.addErrorListener(syntaxErrorListener);
            parser.setBuildParseTree(true);
            parser.setErrorHandler(new DefaultErrorStrategy());
            parseResult = parser.parse(parserInterpreterData.startRuleIndex);

            String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
            FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
            fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
            parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
        }

        results.addResult(fileParseResultData);
        results.addResult(parseTreeResult);
    }
}
项目:goworks    文件:CurrentRuleContextParserTask.java   
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_ruleSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    if (requestedData.contains(GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT)) {
        CurrentRuleContextData data = null;
        if (context.getPosition() != null) {
            int caretOffset = context.getPosition().getOffset();

            Future<ParserData<List<Anchor>>> result =
                taskManager.getData(snapshot, GrammarParserDataDefinitions.DYNAMIC_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.SYNCHRONOUS));

            ParserData<List<Anchor>> anchorsData = result.get();
            List<Anchor> anchors = anchorsData.getData();

            GrammarParser.RuleSpecContext ruleContext = null;
            int grammarType = -1;

            Future<ParserData<FileModel>> fileModelResult =
                taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.ALLOW_STALE, ParserDataOptions.SYNCHRONOUS));
            ParserData<FileModel> fileModelData = fileModelResult.get();
            FileModel fileModel = fileModelData.getData();

            if (anchors != null) {
                Anchor enclosing = null;

                /*
                * parse the current rule
                */
                for (Anchor anchor : anchors) {
                    if (anchor instanceof GrammarParserAnchorListener.GrammarTypeAnchor) {
                        grammarType = ((GrammarParserAnchorListener.GrammarTypeAnchor)anchor).getGrammarType();
                        continue;
                    }

                    if (anchor.getSpan().getStartPosition(snapshot).getOffset() <= caretOffset && anchor.getSpan().getEndPosition(snapshot).getOffset() > caretOffset) {
                        enclosing = anchor;
                    } else if (anchor.getSpan().getStartPosition(snapshot).getOffset() > caretOffset) {
                        break;
                    }
                }

                if (enclosing != null) {
                    CharStream input = new DocumentSnapshotCharStream(snapshot);
                    input.seek(enclosing.getSpan().getStartPosition(snapshot).getOffset());
                    GrammarLexer lexer = new GrammarLexer(input);
                    CommonTokenStream tokens = new TaskTokenStream(lexer);
                    GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokens);
                    try {
                        parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
                        parser.removeErrorListeners();
                        parser.setBuildParseTree(true);
                        parser.setErrorHandler(new BailErrorStrategy());
                        ruleContext = parser.ruleSpec();
                    } catch (ParseCancellationException ex) {
                        if (ex.getCause() instanceof RecognitionException) {
                            // retry with default error handler
                            tokens.reset();
                            parser.getInterpreter().setPredictionMode(PredictionMode.LL);
                            parser.setInputStream(tokens);
                            parser.setErrorHandler(new DefaultErrorStrategy());
                            ruleContext = parser.ruleSpec();
                        } else {
                            throw ex;
                        }
                    }
                }
            }

            data = new CurrentRuleContextData(snapshot, grammarType, fileModel, ruleContext);
        }

        results.addResult(new BaseParserData<>(context, GrammarParserDataDefinitions.CURRENT_RULE_CONTEXT, snapshot, data));
    }
}
项目:goworks    文件:ReferenceAnchorsParserTask.java   
@Override
@RuleDependency(recognizer=GrammarParser.class, rule=GrammarParser.RULE_grammarSpec, version=0, dependents=Dependents.SELF)
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    boolean legacyMode = GrammarEditorKit.isLegacyMode(snapshot);
    if (legacyMode) {
        ParserData<List<Anchor>> emptyResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, null);
        results.addResult(emptyResult);
        return;
    }

    synchronized (lock) {
        ParserData<GrammarSpecContext> parseTreeResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<List<Anchor>> anchorPointsResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<FileModel> fileModelResult = taskManager.getData(snapshot, GrammarParserDataDefinitions.FILE_MODEL, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        if (parseTreeResult == null || anchorPointsResult == null || fileModelResult == null) {
            Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, GrammarParserDataDefinitions.LEXER_TOKENS);
            Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
            TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
    //        DocumentSnapshotCharStream input = new DocumentSnapshotCharStream(snapshot);
    //        input.setSourceName((String)document.getDocument().getProperty(Document.TitleProperty));
    //        GrammarLexer lexer = new GrammarLexer(input);
            InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
            GrammarSpecContext parseResult;
            GrammarParser parser = GrammarParserFactory.DEFAULT.getParser(tokenStream);
            try {
                parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
                parser.removeErrorListeners();
                parser.setBuildParseTree(true);
                parser.setErrorHandler(new BailErrorStrategy());
                parseResult = parser.grammarSpec();
            } catch (ParseCancellationException ex) {
                if (ex.getCause() instanceof RecognitionException) {
                    // retry with default error handler
                    tokenStream.reset();
                    parser.getInterpreter().setPredictionMode(PredictionMode.LL);
                    parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
                    parser.setInputStream(tokenStream);
                    parser.setErrorHandler(new DefaultErrorStrategy());
                    parseResult = parser.grammarSpec();
                } else {
                    throw ex;
                }
            }

            parseTreeResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);

            if (anchorPointsResult == null && snapshot.getVersionedDocument().getDocument() != null) {
                GrammarParserAnchorListener listener = new GrammarParserAnchorListener(snapshot);
                ParseTreeWalker.DEFAULT.walk(listener, parseResult);
                anchorPointsResult = new BaseParserData<>(context, GrammarParserDataDefinitions.REFERENCE_ANCHOR_POINTS, snapshot, listener.getAnchors());
            }

            if (fileModelResult == null) {
                FileModelImpl fileModel = null;
                if (snapshot.getVersionedDocument().getFileObject() != null) {
                    CodeModelBuilderListener codeModelBuilderListener = new CodeModelBuilderListener(snapshot, tokenStream);
                    ParseTreeWalker.DEFAULT.walk(codeModelBuilderListener, parseResult);
                    fileModel = codeModelBuilderListener.getFileModel();
                    if (fileModel != null) {
                        updateCodeModelCache(fileModel);
                    }
                }

                fileModelResult = new BaseParserData<>(context, GrammarParserDataDefinitions.FILE_MODEL, snapshot, fileModel);
            }
        }

        results.addResult(parseTreeResult);
        results.addResult(fileModelResult);
        if (anchorPointsResult != null) {
            results.addResult(anchorPointsResult);
        }
    }
}