﻿
#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <stack>
#include <set>
#include <map>
#include <string>
#include <iomanip>
#include <algorithm>
#include<queue>
#include <climits>
using namespace std;
struct NFAState {
    int id;
    map<char, set<int>> transitions;  // 字符转移
    set<int> epsilon;   //ε转移
    bool is_accept;       // 是否为接受状态
    string token_type;    // 接受的Token类型
};
struct DFAState {
    int id=-1;
    set<int> nfa_states;  // 对应的NFA状态集合
    bool is_accept=0;
    string token_type;
    map<char, int> transitions; // 字符到DFA状态的id
};
// 表示一个NFA片段（起始和结束状态）
struct NFAPart {
    int start = -1;  //int的ID号
    int end = -1;
};

class NFA {
public:
    vector<unique_ptr<NFAState>> states;
    int state_counter = 0;   //state的数量
    NFAPart global_part;
    int accept_token_state_id[5] = { -1,-1,-1,-1,-1 };

    // 递归下降解析相关辅助函数
    NFAPart parse_expression(const string& regex, size_t& pos) {
        NFAPart part = parse_term(regex, pos);
        while (pos < regex.size() && regex[pos] == '|') {
            pos++;
            NFAPart rhs = parse_term(regex, pos);
            part = alternate(part, rhs);
        }
        return part;
    }

    NFAPart parse_term(const string& regex, size_t& pos) {
        NFAPart part = parse_factor(regex, pos);
        while (pos < regex.size() && regex[pos] != '|' && regex[pos] != ')') {
            NFAPart next_part = parse_factor(regex, pos);
            part = concatenate(part, next_part);
        }
        return part;
    }

    NFAPart parse_factor(const string& regex, size_t& pos) {
        NFAPart part = parse_atom(regex, pos);
        if (pos < regex.size()) {
            if (regex[pos] == '*') {
                pos++;
                part = kleeneStar(part);
            }
            else if (regex[pos] == '+') { // 处理+号
                pos++;
                part = kleenePlus(part);
            }
        }
        return part;
    }

    NFAPart parse_atom(const string& regex, size_t& pos) {
        if (pos >= regex.size()) {
            throw invalid_argument("Unexpected end of regex");
        }
        if (regex[pos] == '[') {
            pos++;
            size_t end_pos = regex.find(']', pos);
            if (end_pos == string::npos) {
                throw invalid_argument("Unclosed character class");
            }
            string content = regex.substr(pos, end_pos - pos);
            pos = end_pos + 1;
            return parse_char_class(content);
        }
        /*else if (regex[pos] == '(') { // 处理括号分组
            pos++;
            NFAPart part = parse_expression(regex, pos);
            if (pos >= regex.size() || regex[pos] != ')') {
                throw invalid_argument("Unclosed parenthesis");
            }
            pos++;
            return part;
        }*/
        else {
            char c = regex[pos];
            pos++;
            return createChar(c);
        }
    }

    // 解析字符类（内部实现）
    NFAPart parse_char_class(const string& content) {
        NFAPart part;
        bool is_range = false;
        char start_char = 0;

        for (size_t i = 0; i < content.size(); ++i) {
            char c = content[i];

            if (c == '-') {
                if (i == 0 || i == content.size() - 1) { // 处理首尾的-
                    NFAPart p = createChar(c);
                    part = part.start == -1 ? p : alternate(part, p);
                    continue;
                }
                is_range = true;
                continue;
            }

            if (is_range) {
                for (char ch = start_char; ch <= c; ++ch) {
                    NFAPart p = createChar(ch);
                    part = part.start == -1 ? p : alternate(part, p);
                }
                is_range = false;
            }
            else {
                start_char = c;
                NFAPart p = createChar(c);
                part = part.start == -1 ? p : alternate(part, p);
            }
        }

        if (is_range) { // 处理以-结尾的情况
            NFAPart p = createChar('-');
            part = part.start == -1 ? p : alternate(part, p);
        }

        if (part.start == -1) {
            throw invalid_argument("Empty character class");
        }
        return part;
    }


    // 创建新状态
    int createState() {
        auto state = make_unique<NFAState>();
        state->id = state_counter++;
        states.push_back(move(state));
        return state_counter - 1;
    }

    // 获取状态指针
    NFAState* getState(int id) {
        if (id < 0 || id >= states.size()) {
            throw out_of_range("Invalid NFA state id: " + to_string(id));
        }
        return states[id].get();
    }

    // 基本操作：匹配单个字符
    NFAPart createChar(char c) {
        int s = createState();
        int e = createState();
        getState(s)->transitions[c].insert(e);
        return { s, e };
    }

    // 连接两个NFA片段
    NFAPart concatenate(NFAPart a, NFAPart b) {
        getState(a.end)->epsilon.insert(b.start);
        return { a.start, b.end };
    }

    // 选择操作
    NFAPart alternate(NFAPart a, NFAPart b) {
        int s = createState();
        int e = createState();

        getState(s)->epsilon.insert(a.start);
        getState(s)->epsilon.insert(b.start);

        getState(a.end)->epsilon.insert(e);
        getState(b.end)->epsilon.insert(e);

        return { s, e };
    }

    // 闭包操作
    NFAPart kleeneStar(NFAPart part) {
        int s = createState();
        int e = createState();

        getState(s)->epsilon.insert(part.start);
        getState(s)->epsilon.insert(e);

        getState(part.end)->epsilon.insert(part.start);
        getState(part.end)->epsilon.insert(e);

        return { s, e };
    }
    NFAPart kleenePlus(NFAPart part) {
        // 创建新的起始和结束状态
        int s = createState();
        int e = createState();

        // 新起始状态通过ε转移到原片段的起始状态
        getState(s)->epsilon.insert(part.start);

        // 原片段的结束状态可以转移到新结束状态或回到原片段起始（实现重复）
        getState(part.end)->epsilon.insert(e);
        getState(part.end)->epsilon.insert(part.start);

        return { s, e }; // 返回新的起始和结束状态
    }
    // 解析正则表达式入口
    NFAPart parse_regex(const string& regex) {
        size_t pos = 0;
        return parse_expression(regex, pos);
    }

    // 构建完整的词法NFA
    void build_nfa_from_regex(const string& regex, const string& token_type) {
        NFAPart part = parse_regex(regex);

        getState(part.end)->is_accept = true;
        getState(part.end)->token_type = token_type;

        if (global_part.start == -1) {
            global_part = part;
        }
        else {
            global_part = alternate(global_part, part);
        }
    }
    // 最终构建完整的NFA
    void build_global_nfa() {
        int start_state = createState();
        getState(start_state)->epsilon.insert(global_part.start);
    }
    //打印
    void print() {
        using namespace std;
        for (auto& state_ptr : states) {
            NFAState* state = state_ptr.get();
            cout << "State " << state->id;
            if (state->is_accept) {
                cout << " [Accept: " << quoted(state->token_type) << "]";
            }
            cout << "\n";

            // 处理epsilon转移
            if (!state->epsilon.empty()) {
                cout << "  ε → ";
                bool first = true;
                for (int dest : state->epsilon) {
                    if (!first) cout << ", ";
                    cout << dest;
                    first = false;
                }
                cout << "\n";
            }

            // 处理字符转移
            for (auto& trans : state->transitions) {
                char c = trans.first;
                auto& dests = trans.second;
                cout << "  " << quoted(string(1, c)) << " → ";
                bool first_dest = true;
                for (int dest : dests) {
                    if (!first_dest) cout << ", ";
                    cout << dest;
                    first_dest = false;
                }
                cout << "\n";
            }

            cout << flush;
        }
    }
};
class DFA {
public:
    vector<DFAState> states;
    int start_id;
    map<set<int>, int> state_map;

    // 构建DFA的主函数
    void build_from_nfa(NFA& nfa) {
        // 获取NFA的初始状态（build_global_nfa后创建的起始状态）
        int nfa_start_id = nfa.state_counter - 1;

        // 计算初始状态的ε闭包
        set<int> initial_states = { nfa_start_id };
        set<int> initial_closure = epsilon_closure(nfa, initial_states);

        // 创建初始DFA状态
        DFAState start_state;
        start_state.id = 0;
        start_state.nfa_states = initial_closure;
        determine_accept_status(start_state, nfa);

        states.push_back(start_state);
        state_map[initial_closure] = 0;

        queue<int> process_queue;
        process_queue.push(0);

        // 收集所有输入字符
        set<char> alphabet = collect_alphabet(nfa);

        int next_id = 1;

        // 处理队列中的每个DFA状态
        while (!process_queue.empty()) {
            int current_id = process_queue.front();
            process_queue.pop();

            // 处理每个可能的输入字符
            for (char c : alphabet) {
                set<int> moved = move(nfa, states[current_id].nfa_states, c);
                if (moved.empty()) continue;

                set<int> new_closure = epsilon_closure(nfa, moved);

                if (new_closure.empty()) continue;

                // 检查是否已存在该状态集合对应的DFA状态
                auto it = state_map.find(new_closure);
                if (it == state_map.end()) {
                    DFAState new_state;
                    new_state.id = next_id;
                    new_state.nfa_states = new_closure;
                    determine_accept_status(new_state, nfa);

                    states.push_back(new_state);
                    state_map[new_closure] = next_id;
                    process_queue.push(next_id);
                    next_id++;
                }

                // 添加转移
                states[current_id].transitions[c] = state_map[new_closure]; // 直接通过索引访问
            }
        }

        start_id = 0;
    }

private:
    // 收集NFA中所有出现的字符作为字母表
    set<char> collect_alphabet(const NFA& nfa) {
        set<char> chars;
        for (const auto& state_ptr : nfa.states) {
            for (const auto& trans_pair : state_ptr->transitions) {
                chars.insert(trans_pair.first);
            }
        }
        return chars;
    }

    // 计算ε闭包（BFS实现）
    set<int> epsilon_closure( NFA& nfa, const set<int>& states) {
        set<int> closure;
        stack<int> stack;

        for (int s : states) {
            closure.insert(s);
            stack.push(s);
        }

        while (!stack.empty()) {
            int current = stack.top();
            stack.pop();

            const NFAState* state = nfa.getState(current);
            for (int e : state->epsilon) {
                if (!closure.count(e)) {
                    closure.insert(e);
                    stack.push(e);
                }
            }
        }

        return closure;
    }

    // 计算字符转移后的状态集合
    set<int> move( NFA& nfa, const set<int>& states, char c) {
        set<int> result;
        for (int s : states) {
            const NFAState* state = nfa.getState(s);
            auto it = state->transitions.find(c);
            if (it != state->transitions.end()) {
                for (int dest : it->second) {
                    result.insert(dest);
                }
            }
        }
        return result;
    }

    // 确定DFA状态的接受状态和对应的token类型
    void determine_accept_status(DFAState& dfa_state, NFA& nfa) {
        int min_accept_id = INT_MAX;
        dfa_state.is_accept = false;
        dfa_state.token_type = "";

        // 遍历所有NFA状态，找到优先级最高的接受状态
        for (int s : dfa_state.nfa_states) {
            const NFAState* state = nfa.getState(s);
            if (state->is_accept) {
                dfa_state.is_accept = true;
                if (state->id < min_accept_id) { // 选择id最小的接受状态（优先级最高）
                    min_accept_id = state->id;
                    dfa_state.token_type = state->token_type;
                }
            }
        }
    }
};
class Lexer {
private:
    vector<NFAState> nfa_states;
    DFAState* dfa_start;
    vector<DFAState> dfa_states;
    int dfa_start_id = 0;
    map<set<int>, int> dfa_state_map;  // 状态集合到DFA ID的映射
    map<int, NFAState> nfa_state_map;
    NFA nfa;
    DFA dfa;
    // 构建关键字、标识符等的NFA
    string trim(const string& s) {  // 辅助函数：去除字符串首尾空格
        size_t start = s.find_first_not_of(" \t");
        if (start == string::npos) return "";
        size_t end = s.find_last_not_of(" \t");
        return s.substr(start, end - start + 1);
    }
    void build_nfa(const string& regex, const string& token_type) {
        nfa.build_nfa_from_regex(regex, token_type);
    }


public:
    Lexer(const string& grammar_file) {
        ifstream file(grammar_file);
        if (!file.is_open()) {
            cerr << "错误，无法打开文件" << grammar_file << endl;
            exit(1);
        }
        string line;
        while (getline(file, line)) {
            line = trim(line);
            // 跳过空行和注释（以#开头为注释）
            if (line.empty() || line[0] == '#') continue;

            // 查找箭头符号 → 或 ->
            size_t arrow_pos = line.find("→");
            int arrow_len = 1;//记录箭头符号的长度
            if (arrow_pos == string::npos) {//如果没有找到
                arrow_pos = line.find("->");
                arrow_len = 2;
            }

            if (arrow_pos == string::npos) {
                cerr << "警告：无效语句：" << line << endl;//输出错误信息
                continue;
            }

            // 分割左右部分
            string regex = trim(line.substr(0, arrow_pos));
            string token_type = trim(line.substr(arrow_pos + arrow_len));

            if (regex.empty() || token_type.empty()) {
                cerr << "警告：检测到语句缺失： " << line << endl;
                continue;
            }
            // 调用NFA构建函数
            try {
                build_nfa(regex, token_type);
            }
            catch (const exception& e) {
                cerr << "Error parsing regex '" << regex << "': " << e.what() << endl;
                exit(1);
            }
        }
        nfa.build_global_nfa();
        dfa.build_from_nfa(nfa);
        
    }

    void NFAprint() {
        nfa.print();
    }
    void DFAprint() {
        for (const DFAState& state : dfa.states) {
            cout << "DFA State " << state.id << ": ";
            cout << (state.is_accept ? "Accept " + state.token_type : "Non-Accept") << endl;
            cout << "NFA States: ";
            for (int s : state.nfa_states) cout << s << " ";
            cout << "\nTransitions: ";
            for (const auto& trans : state.transitions) {
                cout << trans.first << "->" << trans.second << " ";
            }
            cout << "\n\n";
        }
    }
    void resultprint(const string& text_file) {
        ifstream file(text_file);
        string line;
        int line_num = 1;
        while (getline(file, line)) {
            line = trim(line);
            // 跳过空行和注释（以#开头为注释）
            if (line.empty() || line[0] == '#') continue;
            else {
                vector<tuple<int, string, string>> a=tokenize(line);
                for (const auto& i : a) {
                    cout << "(" << line_num << "," << get<1>(i) << "," << get<2>(i) << ")"; //调用
                }
                cout << endl;
            }
            line_num++;
        }
    }
    vector<tuple<int, string, string>> tokenize(const string& source) {
        vector<tuple<int, string, string>> tokens;
        int current_line = 1;
        int pos = 0;
        int len = source.length();

        while (pos < len) {
            // 跳过空白字符
            while (pos < len) {
                char c = source[pos];
                if (c == ' ' || c == '\t' || c == '\r') {
                    pos++;
                }
                else if (c == '\n') {
                    current_line++;
                    pos++;
                }
                else {
                    break;
                }
            }
            if (pos >= len) break;

            int start_pos = pos;
            int start_line = current_line;
            int current_state = dfa_start_id;
            int last_accept_pos = -1;
            string last_token_type;

            int temp_pos = pos;
            while (temp_pos < len) {
                char c = source[temp_pos];
                // 更新行号（处理换行符）
                if (c == '\n') {
                    current_line++;
                }

                // 获取当前DFA状态
                const DFAState* current_dfa_state = nullptr;
                for (const auto& state : dfa.states) {
                    if (state.id == current_state) {
                        current_dfa_state = &state;
                        break;
                    }
                }
                if (!current_dfa_state) {
                    break; // 无效状态，无法继续
                }

                // 检查当前字符是否有转移
                auto it = current_dfa_state->transitions.find(c);
                if (it == current_dfa_state->transitions.end()) {
                    break; // 无转移，结束当前匹配
                }
                current_state = it->second;
                temp_pos++;

                // 检查新状态是否是接受状态
                const DFAState* new_dfa_state = nullptr;
                for (const auto& state : dfa.states) {
                    if (state.id == current_state) {
                        new_dfa_state = &state;
                        break;
                    }
                }
                if (new_dfa_state && new_dfa_state->is_accept) {
                    last_accept_pos = temp_pos; // 记录为结束位置的下一个字符索引
                    last_token_type = new_dfa_state->token_type;
                }
            }

            if (last_accept_pos != -1) {
                // 生成Token
                string content = source.substr(start_pos, last_accept_pos - start_pos);
                tokens.emplace_back(start_line, last_token_type, content);
                pos = last_accept_pos;
            }
            else {
                // 报错：无法识别的字符
                string error_char = source.substr(start_pos, 1);
                cerr << "Error at line " << start_line << ": Invalid character '" << error_char << "'" << endl;
                pos = start_pos + 1; // 跳过该字符继续
            }
        }

        return tokens;
    }
};
int main() {
    Lexer lexer("grammar.txt");
    lexer.resultprint("text.txt");
   // lexer.NFAprint();
    //lexer.DFAprint();
    return 0;
}   








