use std::borrow::Cow;
// lexer.rs
use std::collections::HashSet;
use std::hash::Hash;
use std::{iter, usize};

// 词法单元类型
#[derive(Debug, Clone, PartialEq)]
pub enum TokenType {
    Identifier,
    Integer,
    Float,
    Keyword(&'static str/*alias*/, &'static str),
    Operator(&'static str/*alias*/, &'static str),
    Separator(&'static str/*alias*/, char),
    CommentSlashSlash,
    CommentSlashStar,
    Unknown,
}

#[derive(Debug, Clone)]
pub struct Token {
    pub token_type: TokenType,
    pub lexeme: String,
    pub position: usize,
    pub line: usize,
}

impl Token {
    fn get_alias(&self) -> &str {
        match self.token_type {
            TokenType::Identifier => "IDENT",
            TokenType::Integer => "INTEGER_CONST",
            TokenType::Float => "FLOAT_CONST",
            TokenType::Keyword(alias, _) => alias,
            TokenType::Operator(alias, _) => alias,
            TokenType::Separator(alias, _) => alias,
            TokenType::CommentSlashSlash => "LINE_COMMENT",
            TokenType::CommentSlashStar => "MULTILINE_COMMENT",
            TokenType::Unknown => "Unknown",
        }
    }

    fn get_value<'a>(&'a self) -> Cow<'a, str> {
        match self.token_type {
            TokenType::Integer => {
                enum Radix {
                    Dec,
                    Hex,
                    Oct,
                }
                let mut chars = self.lexeme.chars().peekable();
                let negative = chars.peek().map_or(false, |c| *c == '-');
                if negative { chars.next(); }
                let radix = match chars.next() {
                    Some('0') => match chars.next() {
                        Some('x' | 'X') => Radix::Hex,
                        None => Radix::Dec,
                        _ => Radix::Oct,
                    }
                    _ => Radix::Dec
                };
                match radix {
                    Radix::Dec => Cow::from(&self.lexeme),
                    Radix::Hex => if negative {
                            Cow::from(format!("-{}", i32::from_str_radix(&self.lexeme[3..], 16).unwrap_or(0)))
                        } else {
                            Cow::from(i32::from_str_radix(&self.lexeme[2..], 16).unwrap_or(0).to_string())
                        }
                    Radix::Oct => if negative {
                        Cow::from(format!("-{}", i32::from_str_radix(&self.lexeme[2..], 8).unwrap_or(0)))
                    } else {
                        Cow::from(i32::from_str_radix(&self.lexeme[1..], 8).unwrap_or(0).to_string())
                    }
                }
            },
            TokenType::Keyword(_, key) => Cow::from(key),
            TokenType::Operator(_, op) => Cow::from(op),
            TokenType::Separator(_, sep) => Cow::from(sep.to_string()),
            _ => Cow::from(&self.lexeme),
        }
    }
}

// NFA 状态：用索引表示
type State = usize;

// NFA 转移：字符（或 ε）→ 多个状态
struct Transition {
    from: State,
    to: State,
    input_func: Option<Box<dyn Fn(char) -> bool>>
}

// NFA 定义
struct NFA {
    states: Vec<State>,
    start: State,
    accepts: HashSet<Ending>,
    transitions: Vec<Transition>,
    token_type: TokenType,
    must_be_accept_since_start: Option<State>
}

#[derive(Debug, Clone)]
struct Ending(State, AcceptType);

impl Hash for Ending {
    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
        self.0.hash(state);
    }
}

impl PartialEq for Ending {
    fn eq(&self, other: &Self) -> bool {
        self.0 == other.0
    }
}

impl Eq for Ending {
}

#[derive(Debug, Clone)]
enum AcceptType {
    Accept(bool/*ending*/),
    Error
}

impl Ending {
    fn accept(state: State) -> Self {
        Ending(state, AcceptType::Accept(false))
    }

    fn one_ending(state: State) -> Self {
        Ending(state, AcceptType::Accept(true))
    }

    fn error(state: State) -> Self {
        Ending(state, AcceptType::Error)
    }
}

impl NFA {
    fn new(token_type: TokenType) -> Self {
        NFA {
            states: vec![0], // 初始状态 0
            start: 0,
            accepts: HashSet::new(),
            transitions: vec![],
            token_type,
            must_be_accept_since_start: None
        }
    }

    // 构建匹配特定字符串的 NFA
    fn build_keyword_nfa(keyword_alias: &'static str, s: &'static str) -> Self {
        let mut nfa = NFA::new(TokenType::Keyword(keyword_alias, s));
        let mut state = 0;
        for c in s.chars() {
            let next = state + 1;
            nfa.add_transition(state, next, c);
            state = next;
        }
        nfa.add_accept(Ending::one_ending(state));

        nfa
    }

    // 构建匹配特定字符串的 NFA
    fn build_operator_nfa(keyword_alias: &'static str, s: &'static str) -> Self {
        let mut nfa = NFA::new(TokenType::Operator(keyword_alias, s));
        let mut state = 0;
        for c in s.chars() {
            let next = state + 1;
            nfa.add_transition(state, next, c);
            state = next;
        }
        nfa.add_accept(Ending::one_ending(state));

        nfa
    }

    // 构建标识符 NFA: [a-zA-Z][a-zA-Z0-9]*
    fn build_identifier_nfa() -> Self {
        let mut nfa = NFA::new(TokenType::Identifier);
        let start = 0;
        let after_first = 1;

        // 第一个字符：字母或下划线
        for c in ('a'..='z').chain('A'..='Z').chain(iter::once('_')) {
            nfa.add_transition(start, after_first, c);
        }

        // 后续字符：字母或数字
        for c in ('a'..='z').chain('A'..='Z').chain('0'..='9') {
            nfa.add_transition(after_first, after_first, c);
        }

        nfa.add_accept(Ending::accept(after_first));
        nfa
    }

    fn build_float_nfa() -> Self {
        let mut nfa = NFA::new(TokenType::Float);
        let start = 0;
        let negative_state = 1;
        let before_dot_state = 2;
        let in_dot_state = 3;
        let after_dot_state = 4;
        let error_state = 5;

        nfa.add_transition(start, negative_state, '-');
        nfa.add_transition(before_dot_state, in_dot_state, '.');
        nfa.add_transition(after_dot_state, error_state, '.');

        for c in '0'..='9' {
            nfa.add_transition(start, before_dot_state, c);
            nfa.add_transition(negative_state, before_dot_state, c);
            nfa.add_transition(in_dot_state, after_dot_state, c);
            nfa.add_transition(before_dot_state, before_dot_state, c);
            nfa.add_transition(after_dot_state, after_dot_state, c);
        }
        nfa.add_accept(Ending::accept(after_dot_state));
        nfa.add_accept(Ending::error(error_state));
        nfa
    }

    // 构建整数
    fn build_integer_nfa() -> Self {
        let mut nfa = NFA::new(TokenType::Integer);
        let start = 0;
        let digit_state = 2;
        let hex_state_pending = 3;
        let eight_state_pending = 5;
        let eight_state = 6;
        let error_state = 7;

        let negative_state = 8;

        nfa.add_transition(start, negative_state, '-');

        for c in '0'..='9' {
            if c == '0' {
                nfa.add_transition(start, hex_state_pending, c);
                nfa.add_transition(start, eight_state_pending, c);

                nfa.add_transition(negative_state, hex_state_pending, c);
                nfa.add_transition(negative_state, eight_state_pending, c);
            }
            nfa.add_transition(start, digit_state, c);
            nfa.add_transition(negative_state, digit_state, c);
            nfa.add_transition(digit_state, digit_state, c);
        }
        let hex_digit_state = 4;
        nfa.add_transition(hex_state_pending, hex_digit_state, 'x');
        nfa.add_transition(hex_state_pending, hex_digit_state, 'X');
        for c in ('a'..='f').chain('A'..='F').chain('0'..='9') {
            if c == '0' {
                nfa.add_transition(eight_state_pending, error_state, c);
                nfa.add_transition(eight_state, eight_state, c);
            } else if ('1'..='9').contains(&c) {
                if c == '8' || c == '9' {
                    nfa.add_transition(eight_state, error_state, c);
                } else {
                    nfa.add_transition(eight_state_pending, eight_state, c);
                    nfa.add_transition(eight_state, eight_state, c);
                }
            } else {
                nfa.add_transition(eight_state_pending, error_state, c);
                nfa.add_transition(eight_state, error_state, c);
            }

            nfa.add_transition(hex_digit_state, hex_digit_state, c);
        }

        nfa.add_accept(Ending::accept(digit_state));
        nfa.add_accept(Ending::accept(hex_digit_state));
        nfa.add_accept(Ending::accept(eight_state));
        nfa.add_accept(Ending::error(error_state));

        // nfa.add_transition_func(digit_state, error_state, |c| c.is_whitespace());
        // nfa.add_transition_func(hex_digit_state, error_state, |c| c.is_whitespace());
        // nfa.add_transition_func(eight_state, error_state, |c| c.is_whitespace());
        //
        // nfa.add_un_accept(hex_ready_state);
        // nfa.add_un_accept(error_state);
        nfa
    }

    fn build_comment_slash_nfa() -> Self {
        let mut nfa = NFA::new(TokenType::CommentSlashSlash);

        let start = 0;
        let after_slash = 1;
        nfa.add_transition(start, after_slash, '/');

        let after_slash_slash = 3;
        // let waiting_for_next_line = 4;
        let slash_slash_end_state = 5;
        nfa.add_transition(after_slash, after_slash_slash, '/');
        nfa.add_transition_func(after_slash_slash, after_slash_slash, |c| {
            return c != '\n'/* && c != '\r'*/;
        });
        nfa.add_transition(after_slash_slash, slash_slash_end_state, '\n');
        nfa.add_accept(Ending::one_ending(slash_slash_end_state));

        nfa
    }

    fn build_comment_slash_star_nfa() -> Self {
        let mut nfa = NFA::new(TokenType::CommentSlashStar);

        let start = 0;
        let after_slash = 1;
        nfa.add_transition(start, after_slash, '/');

        let after_star = 6;
        nfa.add_transition(after_slash, after_star, '*');
        let after_star_star = 7;
        nfa.add_transition(after_star, after_star_star, '*');
        nfa.add_epsilon(after_star_star, after_star);
        nfa.must_be_accept_since_start = Some(after_star);
        nfa.add_transition_func(after_star, after_star, |c| {
            return c != '*';
        });
        let star_slash_end_state = 8;
        nfa.add_transition(after_star_star, star_slash_end_state, '/');

        nfa.add_accept(Ending::one_ending(star_slash_end_state));
        nfa
    }

    // 添加带字符的转移
    fn add_transition(&mut self, from: State, to: State, input: char) {
        self.add_transition_func(from, to, move |c| c == input)
    }

    fn add_transition_func(&mut self, from: State, to: State, input_func: impl Fn(char) -> bool + 'static) {
        self.transitions.push(Transition {
            from,
            to,
            input_func: Some(Box::new(input_func)),
        });
        if !self.states.contains(&to) {
            self.states.push(to);
        }
    }

    // 添加 ε 转移
    fn add_epsilon(&mut self, from: State, to: State) {
        self.transitions.push(Transition {
            from,
            to,
            input_func: None,
        });
        if !self.states.contains(&to) {
            self.states.push(to);
        }
    }

    // 设置接受状态
    fn add_accept(&mut self, state: Ending) {
        self.accepts.insert(state);
    }

    // 执行 NFA 匹配：从某个起始位置开始，尽可能匹配最长有效字符串
    fn match_from(&self, input: &str, start_pos: usize) -> Option<(TokenType, String, usize)> {
        let chars: Vec<char> = input[start_pos..].chars().collect();
        if chars.is_empty() {
            return None;
        }

        let mut current_states = HashSet::new();
        current_states.insert(self.start);

        // ε-闭包初始化
        self.epsilon_closure(&mut current_states);

        let mut matched = None;
        let mut matched_len = 0;
        let mut passed_len = 0;

        'outer: for (i, &c) in chars.iter().enumerate() {
            let mut next_states = HashSet::new();

            // 执行输入转移
            for &state in current_states.iter() {
                for trans in &self.transitions {
                    if trans.from == state && trans.input_func.as_ref().map_or(false, |f| f(c)) {
                        next_states.insert(trans.to);
                    }
                }
            }

            // 计算 ε-闭包
            self.epsilon_closure(&mut next_states);

            if next_states.is_empty() {
                break;
            }

            // 更新当前状态
            current_states = next_states;
            if c == '\n' {
                passed_len += 1;
            }

            // 检查是否有接受状态
            let ending_state = current_states.iter().filter_map(|state| {
                self.accepts.iter().find(|ending| ending.0 == *state).map(|ending| ending.clone())
            }).collect::<Vec<Ending>>();
            for ending in ending_state {
                matched_len = i + 1;
                match ending.1 {
                    AcceptType::Accept(ending) => {
                        matched = Some(self.token_type.clone());
                        if ending {
                            break 'outer;
                        }
                    }
                    AcceptType::Error => {
                        matched = Some(TokenType::Unknown);
                        break 'outer;
                    }
                }
            }
        }

        matched.map(|t| (t, input[start_pos..start_pos + matched_len].to_string(), passed_len))
            .or_else(|| {
                if let Some(state) = self.must_be_accept_since_start {
                    if current_states.contains(&state) {
                        return Some((TokenType::Unknown, input[start_pos..].to_string(), 0));
                    }
                }
                None
            })
    }

    // 计算 ε-闭包
    fn epsilon_closure(&self, states: &mut HashSet<State>) {
        let mut added = true;
        while added {
            added = false;
            let mut new_states = states.clone();
            for &state in states.iter() {
                for trans in &self.transitions {
                    if trans.from == state && trans.input_func.is_none() {
                        if new_states.insert(trans.to) {
                            added = true;
                        }
                    }
                }
            }
            *states = new_states;
        }
    }
}

struct Lexer {
    nfas: Vec<NFA>,
}

impl Lexer {
    pub fn new() -> Self {
        let mut lexer = Lexer { nfas: vec![] };

        // 1. 匹配关键字 if/else/while/...
        for &keyword in &[("IF", "if"), ("CONST", "const"),
            ("ELSE", "else"), ("WHILE", "while"), ("RETURN", "return"), ("BREAK", "break"),
            ("CONTINUE", "continue"), ("INT", "int"), ("FLOAT", "float"), ("VOID", "void")] {
            lexer.nfas.push(NFA::build_keyword_nfa(keyword.0, keyword.1));
        }

        // 2. 匹配标识符 [a-zA-Z][a-zA-Z0-9]*
        lexer.nfas.push(NFA::build_identifier_nfa());

        lexer.nfas.push(NFA::build_float_nfa());
        lexer.nfas.push(NFA::build_integer_nfa());

        // 4. 匹配操作符
        for &op in &[("NOT", "!"), ("PLUS", "+"), ("MINUS", "-"), ("MUL", "*"),
            ("DIV", "/"), ("MOD", "%"), ("ASSIGN", "="), ("NEQ", "!="), ("GE", ">="),
            ("GT", ">"), ("LT", "<"), ("LE", "<="), ("EQ", "=="), ("OR", "||"), ("AND", "&&")] {
            lexer.nfas.push(NFA::build_operator_nfa(op.0, op.1));
        }

        // 5. 匹配分隔符
        for &sep in &[("L_PAREN", '('), ("R_PAREN", ')'), ("L_BRACE", '{'),
            ("R_BRACE", '}'), ("L_BRACKET", '['), ("R_BRACKET", ']'),
            ("SEMICOLON", ';'), ("COMMA", ',')] {
            let mut nfa = NFA::new(TokenType::Separator(sep.0, sep.1));
            nfa.add_transition(0, 1, sep.1);
            nfa.add_accept(Ending::one_ending(1));
            lexer.nfas.push(nfa);
        }

        // 注释
        lexer.nfas.push(NFA::build_comment_slash_nfa());
        lexer.nfas.push(NFA::build_comment_slash_star_nfa());

        lexer
    }

    pub fn tokenize(&self, input: &str) -> (Vec<Token>/*success*/, Vec<Token>/*fail*/) {
        let mut tokens = vec![];
        let mut tokens_failed = vec![];
        let mut pos = 0;
        let mut line = 1;
        let input_len = input.len();

        while pos < input_len {
            // 跳过空白与换行符
            let mut peekable = input[pos..].chars().peekable();
            let next_char = peekable.next();
            if let Some(char) = next_char {
                if char == '\n' {
                    line += 1;
                    pos += 1;
                    continue;
                } else if char == '\r' && peekable.peek().map_or(false, |c| *c == '\n') {
                    line += 1;
                    pos += 2;
                    continue;
                } else if char.is_whitespace() {
                    pos += 1;
                    continue;
                }
            }

            // 尝试所有 NFA，选择**最长匹配**
            let mut best_match: Option<(TokenType, String, usize)> = None;

            for nfa in &self.nfas {
                if let Some((ty, lexeme, passed_len)) = nfa.match_from(input, pos) {
                    if matches!(ty, TokenType::Unknown) {
                        best_match = Some((ty, lexeme, passed_len));
                        break;
                    } else if best_match.is_none() || lexeme.len() > best_match.as_ref().unwrap().1.len() {
                        best_match = Some((ty, lexeme, passed_len));
                    }
                }
            }

            if let Some((ty, lexeme, len)) = best_match {
                pos += lexeme.len();
                line += len;
                if matches!(ty, TokenType::Unknown) {
                    tokens_failed.push(Token {
                        token_type: ty,
                        lexeme,
                        position: pos,
                        line
                    });
                } else {
                    tokens.push(Token {
                        token_type: ty,
                        lexeme,
                        position: pos,
                        line
                    });
                }
            } else {
                // 无法识别
                let c = input[pos..].chars().next().unwrap();
                pos += c.len_utf8();
                tokens_failed.push(Token {
                    token_type: TokenType::Unknown,
                    lexeme: c.to_string(),
                    position: pos,
                    line
                });
            }
        }

        (tokens, tokens_failed)
    }
}

fn main() {
    let args = std::env::args().collect::<Vec<_>>();
    if args.len() < 2 || !args.get(1).unwrap().ends_with(".sy") {
        eprintln!("Usage: {} <file.sy>", args[0]);
        return;
    }
    let lexer = Lexer::new();
    let Ok(code) = std::fs::read_to_string(args.get(1).unwrap()) else {
        eprintln!("Error: Can't open file.");
        return;
    };

    let (token_success, token_fail) = lexer.tokenize(&code);

    if token_fail.is_empty() {
        for token in token_success {
            match &token.token_type {
                TokenType::CommentSlashSlash | TokenType::CommentSlashStar => {},
                _ => eprintln!("{} {} at Line {}.", token.get_alias(), token.get_value(), token.line)
            }
        }
    } else {
        for token in token_fail {
            eprintln!("Error type A at Line {}: Mysterious character \"{}\"", token.line, token.lexeme)
        }
    }
}
