Java 类org.antlr.v4.runtime.atn.ATNSerializer 实例源码

项目:codebuff    文件:Grammar.java   
public LexerInterpreter createLexerInterpreter(CharStream input) {
    if (this.isParser()) {
        throw new IllegalStateException("A lexer interpreter can only be created for a lexer or combined grammar.");
    }

    if (this.isCombined()) {
        return implicitLexer.createLexerInterpreter(input);
    }

    char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
    ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
    return new LexerInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), ((LexerGrammar)this).modes.keySet(), deserialized, input);
}
项目:codebuff    文件:Grammar.java   
/** @since 4.5.1 */
public GrammarParserInterpreter createGrammarParserInterpreter(TokenStream tokenStream) {
    if (this.isLexer()) {
        throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
    }
    char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
    ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
    return new GrammarParserInterpreter(this, deserialized, tokenStream);
}
项目:codebuff    文件:Grammar.java   
public ParserInterpreter createParserInterpreter(TokenStream tokenStream) {
    if (this.isLexer()) {
        throw new IllegalStateException("A parser interpreter can only be created for a parser or combined grammar.");
    }

    char[] serializedAtn = ATNSerializer.getSerializedAsChars(atn);
    ATN deserialized = new ATNDeserializer().deserialize(serializedAtn);
    return new ParserInterpreter(fileName, getVocabulary(), Arrays.asList(getRuleNames()), deserialized, tokenStream);
}
项目:codebuff    文件:SerializedATN.java   
public SerializedATN(OutputModelFactory factory, ATN atn) {
        super(factory);
        IntegerList data = ATNSerializer.getSerialized(atn);
        serialized = new ArrayList<String>(data.size());
        for (int c : data.toArray()) {
            String encoded = factory.getGenerator().getTarget().encodeIntAsCharEscape(c == -1 ? Character.MAX_VALUE : c);
            serialized.add(encoded);
        }
//      System.out.println(ATNSerializer.getDecoded(factory.getGrammar(), atn));
    }
项目:goworks    文件:ParserInterpreterData.java   
public static ParserInterpreterData buildFromSnapshot(DocumentSnapshot snapshot) {
    LexerInterpreterData lexerInterpreterData = LexerInterpreterData.buildFromSnapshot(snapshot);
    if (lexerInterpreterData == null) {
        return null;
    }

    List<SyntaxError> syntaxErrors = new ArrayList<>();
    Tool tool = new CustomTool(snapshot);
    tool.errMgr = new CustomErrorManager(tool);
    tool.addListener(new ErrorListener(snapshot, tool, syntaxErrors));
    tool.libDirectory = new File(snapshot.getVersionedDocument().getFileObject().getPath()).getParent();

    ANTLRStringStream stream = new ANTLRStringStream(snapshot.getText());
    stream.name = snapshot.getVersionedDocument().getFileObject().getNameExt();
    GrammarRootAST ast = tool.parse(stream.name, stream);
    Grammar grammar = tool.createGrammar(ast);
    if (grammar instanceof LexerGrammar) {
        return null;
    }

    tool.process(grammar, false);

    ParserInterpreterData data = new ParserInterpreterData();

    // start by filling in the lexer data
    data.lexerInterpreterData = lexerInterpreterData;

    // then fill in the parser data
    data.grammarFileName = grammar.fileName;
    data.serializedAtn = ATNSerializer.getSerializedAsString(grammar.atn, Arrays.asList(grammar.getRuleNames()));
    data.vocabulary = grammar.getVocabulary();
    data.ruleNames = new ArrayList<>(grammar.rules.keySet());

    return data;
}
项目:antlrworks2    文件:ParserInterpreterData.java   
public static ParserInterpreterData buildFromSnapshot(DocumentSnapshot snapshot) {
    LexerInterpreterData lexerInterpreterData = LexerInterpreterData.buildFromSnapshot(snapshot);
    if (lexerInterpreterData == null) {
        return null;
    }

    List<SyntaxError> syntaxErrors = new ArrayList<>();
    Tool tool = new CustomTool(snapshot);
    tool.errMgr = new CustomErrorManager(tool);
    tool.addListener(new ErrorListener(snapshot, tool, syntaxErrors));
    tool.libDirectory = new File(snapshot.getVersionedDocument().getFileObject().getPath()).getParent();

    ANTLRStringStream stream = new ANTLRStringStream(snapshot.getText());
    stream.name = snapshot.getVersionedDocument().getFileObject().getNameExt();
    GrammarRootAST ast = tool.parse(stream.name, stream);
    Grammar grammar = tool.createGrammar(ast);
    if (grammar instanceof LexerGrammar) {
        return null;
    }

    tool.process(grammar, false);

    ParserInterpreterData data = new ParserInterpreterData();

    // start by filling in the lexer data
    data.lexerInterpreterData = lexerInterpreterData;

    // then fill in the parser data
    data.grammarFileName = grammar.fileName;
    data.serializedAtn = ATNSerializer.getSerializedAsString(grammar.atn, Arrays.asList(grammar.getRuleNames()));
    data.vocabulary = grammar.getVocabulary();
    data.ruleNames = new ArrayList<>(grammar.rules.keySet());

    return data;
}
项目:intellij-plugin-v4    文件:PreviewParser.java   
public PreviewParser(Grammar g, TokenStream input) {
    this(g, new ATNDeserializer().deserialize(ATNSerializer.getSerializedAsChars(g.getATN())), input);
}