Java 类org.antlr.v4.runtime.atn.ATNDeserializer 实例源码

项目:Scratch-ApuC    文件:Parser.java   
/**
 * The ATN with bypass alternatives is expensive to create so we create it
 * lazily.
 *
 * @throws UnsupportedOperationException if the current parser does not
 * implement the {@link #getSerializedATN()} method.
 */
@NotNull
public ATN getATNWithBypassAlts() {
    String serializedAtn = getSerializedATN();
    if (serializedAtn == null) {
        throw new UnsupportedOperationException("The current parser does not support an ATN with bypass alternatives.");
    }

    synchronized (bypassAltsAtnCache) {
        ATN result = bypassAltsAtnCache.get(serializedAtn);
        if (result == null) {
            ATNDeserializationOptions deserializationOptions = new ATNDeserializationOptions();
            deserializationOptions.setGenerateRuleBypassTransitions(true);
            result = new ATNDeserializer(deserializationOptions).deserialize(serializedAtn.toCharArray());
            bypassAltsAtnCache.put(serializedAtn, result);
        }

        return result;
    }
}
项目:codebuff    文件:Grammar.java   
public LexerInterpreter createLexerInterpreter(CharStream input) {
    if (this.isParser()) {
        throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
    }

    if (this.isCombined()) {
        return implicitLexer.createLexerInterpreter(input);
    }

    char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
    ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
    return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
项目:codebuff    文件:Grammar.java   
/** @since 4.5.1 */
public GrammarParserInterpreter createGrammarParserInterpreter(TokenStream tokenStream) {
    if (this.isLexer()) {
        throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
    }
    char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
    ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
    return new GrammarParserInterpreter(this, deserialized, tokenStream);
}
项目:codebuff    文件:Grammar.java   
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
    if (this.isLexer()) {
        throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
    }

    char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
    ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
    return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
项目:goworks    文件:AbstractGrammarDebuggerEditorKit.java   
private void loadTokens(final Document document, LexerInterpreterData interpreterData, LexerTraceAnalyzer analyzer) {
    try {
        TracingCharStream charStream = new TracingCharStream(analyzer, document.getText(0, document.getLength()));
        TracingLexer lexer = new TracingLexer(interpreterData, analyzer, charStream);
        ATN atn = new ATNDeserializer().deserialize(interpreterData.serializedAtn.toCharArray());
        TracingLexerATNSimulator atnSimulator = new TracingLexerATNSimulator(analyzer, lexer, atn);
        lexer.setInterpreter(atnSimulator);
        CommonTokenStream commonTokenStream = new CommonTokenStream(lexer);
        commonTokenStream.fill();
    } catch (BadLocationException ex) {
        Exceptions.printStackTrace(ex);
    }
}
项目:goworks    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
    startState.apply(lexer);
    return lexer;
}
项目:goworks    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
项目:antlrworks2    文件:AbstractGrammarDebuggerEditorKit.java   
private void loadTokens(final Document document, LexerInterpreterData interpreterData, LexerTraceAnalyzer analyzer) {
    try {
        TracingCharStream charStream = new TracingCharStream(analyzer, document.getText(0, document.getLength()));
        TracingLexer lexer = new TracingLexer(interpreterData, analyzer, charStream);
        ATN atn = new ATNDeserializer().deserialize(interpreterData.serializedAtn.toCharArray());
        TracingLexerATNSimulator atnSimulator = new TracingLexerATNSimulator(analyzer, lexer, atn);
        lexer.setInterpreter(atnSimulator);
        CommonTokenStream commonTokenStream = new CommonTokenStream(lexer);
        commonTokenStream.fill();
    } catch (BadLocationException ex) {
        Exceptions.printStackTrace(ex);
    }
}
项目:antlrworks2    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSourceWithStateV4<SimpleLexerState> createLexer(CharStream input, SimpleLexerState startState) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    ParserDebuggerLexerWrapper lexer = new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, input);
    startState.apply(lexer);
    return lexer;
}
项目:antlrworks2    文件:ParserDebuggerTokensTaskTaggerSnapshot.java   
@Override
protected TokenSource getEffectiveTokenSource(TokenSourceWithStateV4<SimpleLexerState> lexer) {
    ATN atn = new ATNDeserializer().deserialize(lexerInterpreterData.serializedAtn.toCharArray());
    Vocabulary vocabulary = lexerInterpreterData.vocabulary;
    String grammarFileName = lexerInterpreterData.grammarFileName;
    List<String> ruleNames = lexerInterpreterData.ruleNames;
    List<String> modeNames = lexerInterpreterData.modeNames;
    return new ParserDebuggerLexerWrapper(grammarFileName, vocabulary, ruleNames, modeNames, atn, lexer.getInputStream());
}
项目:goworks    文件:ParserDebuggerReferenceAnchorsParserTask.java   
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    //ParserDebuggerEditorKit.LEX
    synchronized (lock) {
        ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        if (fileParseResultData == null || parseTreeResult == null) {
            Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
            Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
            TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
            InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
            ParserRuleContext parseResult;

            ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
            String grammarFileName = parserInterpreterData.grammarFileName;
            Vocabulary vocabulary = parserInterpreterData.vocabulary;
            List<String> ruleNames = parserInterpreterData.ruleNames;
            ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
            TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);

            long startTime = System.nanoTime();
            parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
            parser.getInterpreter().optimize_ll1 = false;
            parser.getInterpreter().reportAmbiguities = true;
            parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
            parser.removeErrorListeners();
            parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
            parser.addErrorListener(new StatisticsParserErrorListener());
            SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
            parser.addErrorListener(syntaxErrorListener);
            parser.setBuildParseTree(true);
            parser.setErrorHandler(new DefaultErrorStrategy());
            parseResult = parser.parse(parserInterpreterData.startRuleIndex);

            String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
            FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
            fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
            parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
        }

        results.addResult(fileParseResultData);
        results.addResult(parseTreeResult);
    }
}
项目:intellij-plugin-v4    文件:PreviewParser.java   
public PreviewParser(Grammar g, TokenStream input) {
    this(g, new ATNDeserializer().deserialize(ATNSerializer.getSerializedAsChars(g.getATN())), input);
}
项目:antlrworks2    文件:ParserDebuggerReferenceAnchorsParserTask.java   
@Override
public void parse(ParserTaskManager taskManager, ParseContext context, DocumentSnapshot snapshot, Collection<? extends ParserDataDefinition<?>> requestedData, ParserResultHandler results)
    throws InterruptedException, ExecutionException {

    //ParserDebuggerEditorKit.LEX
    synchronized (lock) {
        ParserData<FileParseResult> fileParseResultData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        ParserData<ParserRuleContext> parseTreeResult = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, EnumSet.of(ParserDataOptions.NO_UPDATE)).get();
        if (fileParseResultData == null || parseTreeResult == null) {
            Future<ParserData<Tagger<TokenTag<Token>>>> futureTokensData = taskManager.getData(snapshot, ParserDebuggerParserDataDefinitions.LEXER_TOKENS);
            Tagger<TokenTag<Token>> tagger = futureTokensData.get().getData();
            TaggerTokenSource tokenSource = new TaggerTokenSource(tagger, snapshot);
            InterruptableTokenStream tokenStream = new InterruptableTokenStream(tokenSource);
            ParserRuleContext parseResult;

            ParserInterpreterData parserInterpreterData = (ParserInterpreterData)snapshot.getVersionedDocument().getDocument().getProperty(ParserDebuggerEditorKit.PROP_PARSER_INTERP_DATA);
            String grammarFileName = parserInterpreterData.grammarFileName;
            Vocabulary vocabulary = parserInterpreterData.vocabulary;
            List<String> ruleNames = parserInterpreterData.ruleNames;
            ATN atn = new ATNDeserializer().deserialize(parserInterpreterData.serializedAtn.toCharArray());
            TracingParserInterpreter parser = new TracingParserInterpreter(grammarFileName, vocabulary, ruleNames, atn, tokenStream);

            long startTime = System.nanoTime();
            parser.setInterpreter(new StatisticsParserATNSimulator(parser, atn));
            parser.getInterpreter().optimize_ll1 = false;
            parser.getInterpreter().reportAmbiguities = true;
            parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION);
            parser.removeErrorListeners();
            parser.addErrorListener(DescriptiveErrorListener.INSTANCE);
            parser.addErrorListener(new StatisticsParserErrorListener());
            SyntaxErrorListener syntaxErrorListener = new SyntaxErrorListener(snapshot);
            parser.addErrorListener(syntaxErrorListener);
            parser.setBuildParseTree(true);
            parser.setErrorHandler(new DefaultErrorStrategy());
            parseResult = parser.parse(parserInterpreterData.startRuleIndex);

            String sourceName = (String)document.getDocument().getProperty(Document.TitleProperty);
            FileParseResult fileParseResult = new FileParseResult(sourceName, 0, parseResult, syntaxErrorListener.getSyntaxErrors(), tokenStream.size(), startTime, null, parser);
            fileParseResultData = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.FILE_PARSE_RESULT, snapshot, fileParseResult);
            parseTreeResult = new BaseParserData<>(context, ParserDebuggerParserDataDefinitions.REFERENCE_PARSE_TREE, snapshot, parseResult);
        }

        results.addResult(fileParseResultData);
        results.addResult(parseTreeResult);
    }
}