package lab.chapter;

import java.io.CharArrayReader;
import java.io.IOException;
import java.util.*;

/**
 * 基于正则文法自动词法解析
 * 原理：
 * 1. 把正则表达式换成NFA，再转成DFA
 * 2. 要在 NFA 的state 上标记关联的GrammarNode，以便区分被DFA识别出来的是哪种Token。见 regex.java中的regexToNFA
 */
public class Lexer extends Regex {

    public static void main(String[] args) {
        GrammarNode lexerGrammar = GrammarSample.commonLexerGrammar();
        State[] nfaStates = regexToNFA(lexerGrammar);
        List<DFAState> dfaStates = NFA2DFA(nfaStates[0], CharSet.ascii);

        System.out.println("\ndump NFA:");
        nfaStates[0].dump();

        System.out.println("\ndump DFA:");
        dfaStates.get(0).dump();

        String code = "int i = 0; i + 100; if (a == 10) println(a); a <= b;";
        List<Token> tokens = tokenize(code, dfaStates.get(0), lexerGrammar);
        System.out.println("\nTokens");
        for (Token token : tokens) {
            System.out.println(token);
        }
    }

    /**
     * 把字符串解析成 Token 列表
     * @param script 待解析脚本
     * @return token列表
     */
    public static List<Token> tokenize(String script) {
        GrammarNode lexerGrammar = GrammarSample.commonLexerGrammar();
        State[] nfaStates = regexToNFA(lexerGrammar);
        List<DFAState> dfaStates = NFA2DFA(nfaStates[0], CharSet.ascii);
        List<Token> tokens = tokenize(script, dfaStates.get(0), lexerGrammar);
        // 添加结束符
        tokens.add(Token.EOF);
        return tokens;
    }

    /**
     * 1. 找到能消化接下来的字符的DFA
     * 2. 针对这个DFA一直发字符，直到不能接受
     * 3. 查看是否处理结束状态
     * @param code 要编译的原码
     * @param startState 起始状态
     * @param root 根节点
     * @return token列表
     */
    private static List<Token> tokenize(String code, DFAState startState, GrammarNode root) {
        List<Token> tokens = new LinkedList<>();
        CharArrayReader reader = new CharArrayReader(code.toCharArray());
        DFAState currentState = startState;
        DFAState nextState;
        int ich;
        char ch;
        StringBuilder tokenText = new StringBuilder();
        try {
            // 第二个条件，是为了生成最后一个Token
            while ((ich = reader.read()) != -1  || tokenText.length() > 0) {
                ch = (char) ich;
                boolean consumed = false;
                while (!consumed) {
                    nextState = currentState.getNextState(ch);
                    if (nextState == null) {
                        if(currentState == startState) {
                            // 不认识的字符
                            consumed = true;
                        } else if (currentState.isAcceptable()) {
                            GrammarNode grammarNode = getGrammar(currentState, root);
                            assert grammarNode != null;
                            if (!grammarNode.isNeglect()) {
                                Token token = new Token(grammarNode.getName(), tokenText.toString());
                                tokens.add(token);
                            }
                            tokenText = new StringBuilder();
                            // 重新匹配
                            currentState = startState;
                        } else {
                            // 遇到不认识的字符，没有到达结束状态，也无法迁移
                            consumed = true;
                        }
                    } else {
                        // 状态迁移
                        currentState = nextState;
                        tokenText.append(ch);
                        consumed = true;
                    }
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return tokens;
    }

    /**
     * 检查 DFAState 中的各个NFAState， 看是否是某个词法规则的结束节点。
     * 如果有符合两个词法规则，那么以宣布顺序的先后算。比如关键字和标识符就会重叠。
     * @param state 起始状态
     * @param root 根节点
     * @return 下一个语法节点
     */
    private static GrammarNode getGrammar(DFAState state, GrammarNode root) {
        Set<GrammarNode> validGrammars = new HashSet<>();
        for (State child : state.states()) {
            if (child.getGrammarNode() != null) {
                validGrammars.add(child.getGrammarNode());
            }
        }

        // 按顺序遍历词法规则，声明在前的优先级更高
        GrammarNode rtn = null;
        for (GrammarNode grammarNode : root.children()) {
            if (grammarNode.getName() != null) {
                if (validGrammars.contains(grammarNode)) {
                    rtn = grammarNode;
                    break;
                }
            }
        }
        return rtn;
    }
}
