package lab.chapter;


import java.util.*;

// TODO need to fix bug
public class LRParser {
    public static void main(String[] args) {
       // 示例1
       //  语法：
       //  add : mul | add '+' mul;
       //  mul : pri | mul '*' pri;
       //  pri ：INT_LITERAL | LPAREN and RPAREN；
        ASTNode result;
        String script1 = "2+3*(4+5)";
        result = parse(script1, GrammarSample.simpleLeftRecursiveExpressionGrammar());
        result.dump();

        /*
        示例2
        语法：
        expression : add;
        add : mul | add ('+' | '-') mul;
        mul : pri | mul ('*' | '/') pri;
        pri : ID | INT_LITERAL | LPAREN expression RPPAREN;
         */
        String script2 = "a-3/(4+5)";
        result = parse(script2, GrammarSample.leftRecursiveExpressionGrammar());
        result.dump();

        /*
        示例3
        语法：
        expression : assign;
        assign : equal | assign '=' equal;
        equal : rel | equal ('==' | '!=') rel;
        rel : add | rel ('>=' | '>' | '<=' | '<') add;
        add : mul | add ('+' | '-') mul;
        mul : pri | mul ('*' | '/') pri;
        pri : ID | INT_LITERAL | LPAREN expression RPAREN;
         */
        String script3 = "a = 2 - 3 > (4+5)";
        result = parse(script3, GrammarSample.fullLeftRecursiveExpressionGrammar());
        result.dump();
    }

    /**
     * 解析代码，生成 AST
     * @param script 待解析的脚本
     * @param grammarNode 所用语法入口节点
     * @return 节点
     */
    public static ASTNode parse(String script, GrammarNode grammarNode) {
        grammarNode.dump();

        // 转换成 NFA
        List<GrammarNode> allNodes = new LinkedList<>();
        GrammarNFAState startNFAState = grammar2NFA(grammarNode, allNodes);
        startNFAState.dump();

        // 取下所有的命名元素的名称,包括非终结符名称和终结符的名称.
        List<String> grammarNames = new LinkedList<>();
        List<String> tokenNames = new LinkedList<>();
        for (GrammarNode node : allNodes) {
            String name = node.getGrammarName();
            if (name != null) {
                grammarNames.add(name);
                if (node.isToken()) {
                    tokenNames.add(name);
                }
            }
        }

        // 计算所有NFA状态的闭包
        Map<State, Set<State>> closures = calcClosure(startNFAState);
        for (State state : closures.keySet()) {
            Set<State> closure = closures.get(state);
            System.out.println("Closure " + state.getName() + " -> ");
            for (State state1 : closure) {
                System.out.print(" " + state1.getName());
            }
            System.out.println();
        }

        // 将 NFA 转换成 DFA
        List<DFAState> dfaStates = NFA2DFA(startNFAState, grammarNames, closures);
        dfaStates.get(0).dump();

        // TODO 检查语法是否合法

        // 词法分析
        List<Token> tokens = Lexer.tokenize(script);
        TokenReader tokenReader = new TokenReader(tokens);

        // 语法分析

        return shiftReduce(new Stack<ASTNode>(), tokenReader, dfaStates.get(0));
    }

    /**
     * 把语法翻译成 NFA
     * @param grammarNode 入口节点
     * @param allNodes 所有的节点
     * @return NFA 状态
     */
    public static GrammarNFAState grammar2NFA(GrammarNode grammarNode, List<GrammarNode> allNodes) {
        // 1.从一个起始节点，这样会有一个唯一的入口
        GrammarNode start = new GrammarNode("start", GrammarNodeType.And);
        start.addChild(grammarNode);

        // 2.把GrammarNode转换成产生表达式的方式，这样处理起来逻辑更简单
        // 2.1 获得所有的终结符和非终结符
        allNodes.addAll(start.getAllNodes());

        // 2.2 从名称查找GrammarNode的一个表
        Map<String, GrammarNode> nodes = new HashMap<>();
        for (GrammarNode node : allNodes) {
            System.out.print(node.getName() + " ");
            System.out.println(node.getText());
            nodes.put(node.getName(), node);
        }

        // 2.3 为每个GrammarNode中命名结点生成一个或多个产生式
        // 比如， add-> add + mul | mul 会被拆成2个生产式： add -> add + mul , add -> mul
        // 但右边存在未被充分拆解，比如： add -> add (+|-) mul .其中的(+ | -) 还需要进一步拆解
        List<Production> productions = new LinkedList<>();
        generateProduction(nodes, productions);

        // 2.4 把产生式右边的 Or 节点展开，变成最简单的产生式。
        // 比如 add -> add (+ | -) mul。其中的(+ | -)还需要进一步拆解，变成两条：add -> add +mul， add -> add - mul
        simplifyProductions(nodes, productions);

        for (Production production : productions) {
            System.out.println(production);
        }

        // 3. 基于产生式生成 NFA。
        // 3.1 先把每个产生式，都生成一个子图，子图中每个状态的 Item 的 “.”位置依次后移。
        // such as: add -> .add + mul => add -> add. + mul => add -> add + mul.
        Map<Production, GrammarNFAState> subGraphs = new HashMap<>();
        List<GrammarNFAState> states = new LinkedList<>();
        calcSubGraphs(productions, subGraphs, states);

        // 3.2 把各个子图通过 Epsilon转换连接在一起.比如 add -> add + .mul, 建立两条连接,分别是 mul -> .mul * pri,以及 mul -> .pri
        linkSubGraphs(subGraphs, states);

        // 找到起始节点对应的State
        GrammarNFAState rootState = null;
        for (Production production : productions) {
            if (production.lhs.equals("start")) {
                rootState = subGraphs.get(production);
            }
        }

        return rootState;
    }

    /**
     * 计算所有节点的 Closure
     * @param state 起始状态
     * @return map
     */
    private static Map<State, Set<State>> calcClosure(State state) {
        Map<State, Set<State>> closures = new HashMap<>();

        int i = 1;
        System.out.println("calcClosure round : " + i++);
        boolean stable = calcClosure(state, closures, new HashSet<>());
        if (!stable) {
            System.out.println("calcClosure round : " + i++);
            calcClosure(state, closures, new HashSet<>());
        }
        return closures;
    }

    private static  boolean calcClosure(State state, Map<State, Set<State>> closures, Set<State> calculated) {
        calculated.add(state);
        Set<State> closure = null;
        if (closures.containsKey(state)) {
            closure = closures.get(state);
        } else {
            closure = new HashSet<>();
            closures.put(state, closure);
        }

        boolean stable = true;

        if (!closure.contains(state)) {
            closure.add(state);
            stable = false;
        }
        List<State> toAdd = new LinkedList<>();
        for (Transition transition : state.transitions()) {
            State nextState = state.getState(transition);
            if (transition.isEpsilon()) {
                toAdd.add(nextState);
            }
            // 把所有下级节点都计算一下
            boolean childStable;
            if (!calculated.contains(nextState)) {
                childStable = calcClosure(nextState, closures, calculated);
                if (!childStable) {
                    stable = false;
                }
            }
        }

        for (State state1 : toAdd) {
            Set<State> closure1 = closures.get(state1);
            if (!closure.containsAll(closure1)) {
                closure.addAll(closure1);
                stable = false;
            }
        }
        return stable;
    }

    /**
     * 把 NFA 转换成 DFA
     * @param startState 起始NFA状态
     * @param grammarNames 所有符号的集合，包括终结符和非终结符
     * @param closures
     * @return 返回DFA
     */
    protected static List<DFAState> NFA2DFA(State startState, List<String> grammarNames, Map<State, Set<State>> closures) {
        List<DFAState> dfaStates = new LinkedList<>();
        List<DFAState> newStates = new LinkedList<>();

        Set<State> stateSet = closures.get(startState);
        DFAState dfaState = new DFAState(stateSet);
        dfaStates.add(dfaState);
        newStates.add(dfaState);

        // 每次循环，都会计算一些新的 StateSet来。
        // 如果没有新的了，结束计算
        while (newStates.size() > 0) {
            List<DFAState> calculating = newStates;
            newStates = new LinkedList<>();

            for (DFAState dfaState1 : calculating) {
                // 每个 grammarName 循环
                for (String grammarName : grammarNames) {
                    Set<State> nextStateSet = move(dfaState1.states(), grammarName);
                    if (nextStateSet.size() == 0) {
                        continue;
                    }
                    // 把nextStateSet中的每个状态的闭包也加入进来
                    addClosure(nextStateSet, closures);

                    // 看看是不是一个新的状态
                    dfaState = findDFAState(dfaStates, nextStateSet);
                    Transition transition;
                    if (dfaState == null) {
                        dfaState = new DFAState(nextStateSet);
                        dfaStates.add(dfaState);
                        newStates.add(dfaState);
                    }
                    transition = new GrammarTransition(grammarName);
                    dfaState1.addTransition(transition, dfaState);
                }
            }
        }
        return dfaStates;
    }

    /**
     * 计算从某个状态集合，在接收某个字符后，会迁移到哪些新的集合
     * @param states 所有状态
     * @param grammarName 节点名
     * @return 集合
     */
    private static Set<State> move(Set<State> states, String grammarName) {
        Set<State> rtn = new HashSet<>();
        for (State state : states) {
            for (Transition transition : state.transitions()) {
                if (transition.match(grammarName)) {
                    State nextState = state.getState(transition);
                    rtn.add(nextState);
                }
            }
        }
        return rtn;
    }

    /**
     * 计算一个状态集合的闭包，包括这些状态以及可以通过 epsilon 到达的状态
     * @param states 需要计算的闭包
     * @param calculatedClosures 已经计算的闭包
     */
    private static void addClosure(Set<State> states, Map<State, Set<State>> calculatedClosures) {
        Set<State> newStates = new HashSet<>();
        for (State state : states) {
            Set<State> closure = calculatedClosures.get(state);
            if (closure == null) {
                System.out.println("error: closure is null");
            } else {
                newStates.addAll(closure);
            }
        }

        states.addAll(newStates);
    }

    /**
     * 根据 NFA State 集合查找是否存在一个 DFAState，包含同样的 NFA 状态集合
     * @param dfaStates dfa 集合
     * @param states 状态集
     * @return DFAState
     */
    private static DFAState findDFAState(List<DFAState> dfaStates, Set<State> states) {
        DFAState dfaState = null;
        for (DFAState dfaState1 : dfaStates) {
            if (sameStateSet(dfaState1.states(), states)) {
                dfaState = dfaState1;
                break;
            }
        }
        return dfaState;
    }

    // 比较2个 NFA state的集合是否相等
    private static boolean sameStateSet(Set<State> stateSet1, Set<State> stateSet2) {
        if (stateSet1.size() != stateSet2.size()) {
            return false;
        }
        return stateSet1.containsAll(stateSet2);
    }

    /**
     * 为每个 grammarNode 中的命名节点生成一个或者多个产生式
     * 比如： add -> add + mul | mul 会拆成2个产生式： add -> add + mul 以及 add -> mul
     * 但右边存在未被充分拆解时，如 add -> add (+ | -) mul。其中(+|-)还需要进一步拆解。
     * @param nodes 所有节点
     * @param productions 产生式
     */
    private static void generateProduction(Map<String, GrammarNode> nodes, List<Production> productions) {
        for (String name : nodes.keySet()) {
            GrammarNode node = nodes.get(name);
            if (!node.isNamedNode()) {
                continue;
            }
            if (node.getType() == GrammarNodeType.Or) {
                for (GrammarNode child : node.children()) {
                    Production production = new Production();
                    productions.add(production);
                    production.lhs = node.getName();
                    production.rhs.add(child.getName());
                }
            } else if (node.getType() == GrammarNodeType.And) {
                Production production = new Production();
                productions.add(production);
                production.lhs = node.getName();
                for (GrammarNode child : node.children()) {
                    production.rhs.add(child.getName());
                }
            }
        }
    }

    /**
     * 把产生式右边的 Or 节点都展开，变成最简单的产生式。
     * 比如：add -> add (+ | -) mul 其中的（+|-）还需要进一步拆解，变成两条：
     * add -> add + mul
     * @param nodes 待简化的节点
     * @param productions 所有产生式
     */
    private static void simplifyProductions(Map<String, GrammarNode> nodes, List<Production> productions) {
        boolean modified = true;

        int round = 1;
        while (modified) {
            System.out.println("round: " + round++);
            List<Production> toRemove = new LinkedList<>();
            List<Production> newProductions = new LinkedList<>();
            for (Production production : productions) {
                for (int i = 0; i < production.rhs.size(); i++) {
                    String name = production.rhs.get(i);
                    GrammarNode node = nodes.get(name);
                    if (node != null && !node.isNamedNode()) {
                        if (node.getType() == GrammarNodeType.Or) {
                            toRemove.add(production);
                            for (int j = 0; j < node.getChildCount(); j++) {
                                // 创建一个新的产生式
                                Production newProduction = new Production();
                                newProductions.add(newProduction);
                                newProduction.lhs = production.lhs;
                                // copy or 左边的部分
                                for (int k = 0; k < i; k++) {
                                    newProduction.rhs.add(production.rhs.get(k));
                                }
                                // 把 or 的子节点替换上来
                                if (node.getChild(j).isToken()) {
                                    newProduction.rhs.add(node.getChild(j).getToken().getType());
                                } else {
                                    newProduction.rhs.add(node.getChild(j).getName());
                                }
                                // copy 右边的部分
                                for (int k = i + 1; k < production.rhs.size(); k++) {
                                    newProduction.rhs.add(production.rhs.get(k));
                                }
                            }
                            // 每次只替换右边的一个节点就行
                            break;
                        } else if (node.getType() == GrammarNodeType.And) {
                            toRemove.add(production);
                            Production newProduction = new Production();
                            newProductions.add(newProduction);
                            newProduction.lhs = production.lhs;

                            // copy add 左边的部分
                            for (int k = 0; k < i; k++) {
                                newProduction.rhs.add(production.rhs.get(k));
                            }

                            // 把 add 的子节点替换上来
                            for (int j = 0; j < node.getChildCount(); j++) {
                                if (node.getChild(j).isToken()) {
                                    newProduction.rhs.add(node.getChild(j).getToken().getType());
                                } else {
                                    newProduction.rhs.add(node.getChild(j).getName());
                                }
                            }
                            // copy add 右边部分
                            for (int k = i + 1; k < production.rhs.size(); k++){
                                newProduction.rhs.add(production.rhs.get(k));
                            }
                            break;
                        } else if (node.getType() == GrammarNodeType.Token) {
                            toRemove.add(production);
                            Production newProduction = new Production();
                            newProductions.add(newProduction);
                            newProduction.lhs = production.lhs;

                            // copy token 左边部分
                            for (int k = 0; k < i; k++) {
                                newProduction.rhs.add(production.rhs.get(k));
                            }
                            // 添加 Token的子节点
                            newProduction.rhs.add(node.getToken().getType());
                            // copy add 右边的部分
                            for (int k = i + 1; k < production.rhs.size(); k++){
                                newProduction.rhs.add(production.rhs.get(k));
                            }
                        }
                    }
                }
            }
            modified = toRemove.size() > 0;
            productions.removeAll(toRemove);
            productions.addAll(newProductions);
        }
    }

    /**
     * 为每个 production 生产一个子图
     * 子图中每个状态的Item的"."依次后移。
     * @param productions 产生式
     * @param subGraphs 子图
     * @param states 状态集合
     */
    private static void calcSubGraphs(List<Production> productions, Map<Production, GrammarNFAState> subGraphs, List<GrammarNFAState> states) {
        for (Production production : productions) {
            Item item = new Item(production, 0);
            GrammarNFAState state = new GrammarNFAState(item);
            subGraphs.put(production, state);
            states.add(state);
            GrammarNFAState lastState = state;

            for (int i = 0; i < production.rhs.size(); i++) {
                item = new Item(production, i + 1);
                state = new GrammarNFAState(item);
                states.add(state);
                lastState.addTransition(new GrammarTransition(production.rhs.get(i)), state);
                lastState = state;
            }
        }
    }

    /**
     * 把子图通过 epsilon 转换连接在一起。
     * 比如： add -> add + .mul 可以连接成2条： mul -> .mul * pri ,  mul -> .pri
     * @param subGraphs 子图
     * @param states 状态集
     */
    private static void linkSubGraphs(Map<Production, GrammarNFAState> subGraphs, List<GrammarNFAState> states) {
        for (GrammarNFAState state : states) {
            if (state.item.position < state.item.production.rhs.size()) {
                String grammarName = state.item.production.rhs.get(state.item.position);
                for (Production production : subGraphs.keySet()) {
                    if (production.lhs.equals(grammarName)) {
                        GrammarNFAState state1 = subGraphs.get(production);
                        state.addTransition(new GrammarTransition(), state1);
                    }
                }
            }
        }
    }

    /**
     * 通过移进、规约算法，做语法解析
     * @param stack
     * @param tokenReader 读取器
     * @param startState 起始状态
     * @return
     */
    private static ASTNode shiftReduce(Stack<ASTNode> stack, TokenReader tokenReader, DFAState startState) {
        Token token = tokenReader.peek();
        while (token != null) {
            boolean reduced = false;
            // 尝试做移进操作，可能会做多次
            if (stack.size() > 0) {
                reduced = reduce(stack, token, startState);
            }
            // 尝试移进操作
            token = tokenReader.read();
            if (token != Token.EOF) {
                stack.push(new ASTNode(token.getType(), token.getText()));
            }
            if (!reduced && token == Token.EOF) {
                System.out.println("expecting reduce action before EOF");
                break;
            }

            // 刷新 token 的值
            token = tokenReader.peek();
        }

        ASTNode rootNode = null;
        if (stack.size() == 1) {
            if (stack.get(0).getType().equals("start")) {
                rootNode = stack.get(0);
            } else {
                System.out.println("error, expecting the start node as root node");
            }
        } else {
            System.out.println("error, expecting 1 node in stack");
        }
        return rootNode;
    }

    /**
     * 基于栈和左边第一个Token，判断正确的句柄，并做规约操作。
     * 成功的情况：
     * 1。可能做了多次reduce，最后 nextToken 匹配了当前句柄
     * 2。遇到了结尾 $
     * @param stack 栈
     * @param nextToken 下一token
     * @param startState 开始状态
     * @return 如果做了移进操作，则返回 true，否则返回 false
     */
    private static boolean reduce(Stack<ASTNode> stack, Token nextToken, DFAState startState) {
        boolean reduced = false;

        // 在DFA中找到当前状态
        DFAState currentState = startState;
        for (ASTNode astNode : stack) {
            String grammarName = astNode.getText();
            currentState = currentState.getNextState(grammarName);
            assert currentState != null;
        }

        // 找不到下一个状态，当前已经是 start了。
        if (currentState == null) {
            return false;
        }

        // 在当前 DFA 的多个Item 中找到合适的句柄
        // 1。先看哪个能支持继续 shift
        if (nextToken != Token.EOF) {
            for (State state : currentState.states()) {
                Item item = ((GrammarNFAState) state).item;
                String grammarName = item.getNextGrammarName();
                if (grammarName != null) {
                    if (nextToken.getType().equals(grammarName)) {
                        return false;
                    }
                }
            }
        }
        // 2。接下来，要找到一个 Item 来做 Reduce
        for (State state : currentState.states()) {
            Item item = ((GrammarNFAState) state).item;
            if (item.atEnd()) {
                // reduce 到 Item 的左侧代表的语法节点
                String grammarName = item.production.lhs;
                ASTNode node = new ASTNode(grammarName);
                reduced = true;

                // 添加子节点
                int delta = stack.size() - item.production.rhs.size();
                for (int i = delta; i < stack.size(); i++) {
                    if (stack.get(i).getType().equals(item.production.rhs.get(i-delta))) {
                        node.addChild(stack.get(i));
                    } else {
                        System.out.println("error reducing for : " + item);
                    }
                }

                // 弹出这些子节点
                for (int i = 0; i < item.production.rhs.size(); i++) {
                    stack.pop();
                }
                stack.push(node);

                // 基于新的栈,继续做reduce
                reduce(stack, nextToken, startState);
            }

        }
        return reduced;
    }
}
