use pest::Parser;
use pest_derive::Parser;
use std::collections::HashMap;
use std::fs;

#[derive(Parser)]
#[grammar = "expression.pest"]
pub struct ExpressionParser;

// 关键字枚举
#[derive(Debug, Clone, PartialEq)]
pub enum Keyword {
    Const,
    Int,
    Void,
    If,
    Else,
    While,
    Break,
    Continue,
    Return,
}

impl Keyword {
    pub fn token_name(&self) -> &'static str {
        match self {
            Keyword::Const => "CONST",
            Keyword::Int => "INT",
            Keyword::Void => "VOID",
            Keyword::If => "IF",
            Keyword::Else => "ELSE",
            Keyword::While => "WHILE",
            Keyword::Break => "BREAK",
            Keyword::Continue => "CONTINUE",
            Keyword::Return => "RETURN",
        }
    }

    pub fn lexeme(&self) -> &'static str {
        match self {
            Keyword::Const => "const",
            Keyword::Int => "int",
            Keyword::Void => "void",
            Keyword::If => "if",
            Keyword::Else => "else",
            Keyword::While => "while",
            Keyword::Break => "break",
            Keyword::Continue => "continue",
            Keyword::Return => "return",
        }
    }
}

// 运算符枚举
#[derive(Debug, Clone, PartialEq)]
pub enum Operator {
    Plus(String),   // +
    Minus(String),  // -
    Mul(String),    // *
    Div(String),    // /
    Mod(String),    // %
    Assign(String), // =
    Eq(String),     // ==
    Neq(String),    // !=
    Lt(String),     // <
    Gt(String),     // >
    Le(String),     // <=
    Ge(String),     // >=
    Not(String),    // !
    And(String),    // &&
    Or(String),     // ||
}

impl Operator {
    pub fn token_name(&self) -> &str {
        match self {
            Operator::Plus(name) => name,
            Operator::Minus(name) => name,
            Operator::Mul(name) => name,
            Operator::Div(name) => name,
            Operator::Mod(name) => name,
            Operator::Assign(name) => name,
            Operator::Eq(name) => name,
            Operator::Neq(name) => name,
            Operator::Lt(name) => name,
            Operator::Gt(name) => name,
            Operator::Le(name) => name,
            Operator::Ge(name) => name,
            Operator::Not(name) => name,
            Operator::And(name) => name,
            Operator::Or(name) => name,
        }
    }

    pub fn lexeme(&self) -> String {
        match self {
            Operator::Plus(_) => "+".to_string(),
            Operator::Minus(_) => "-".to_string(),
            Operator::Mul(_) => "*".to_string(),
            Operator::Div(_) => "/".to_string(),
            Operator::Mod(_) => "%".to_string(),
            Operator::Assign(_) => "=".to_string(),
            Operator::Eq(_) => "==".to_string(),
            Operator::Neq(_) => "!=".to_string(),
            Operator::Lt(_) => "<".to_string(),
            Operator::Gt(_) => ">".to_string(),
            Operator::Le(_) => "<=".to_string(),
            Operator::Ge(_) => ">=".to_string(),
            Operator::Not(_) => "!".to_string(),
            Operator::And(_) => "&&".to_string(),
            Operator::Or(_) => "||".to_string(),
        }
    }
}

// 分隔符枚举
#[derive(Debug, Clone, PartialEq)]
pub enum Delimiter {
    LParen,    // (
    RParen,    // )
    LBrace,    // {
    RBrace,    // }
    LBracket,  // [
    RBracket,  // ]
    Comma,     // ,
    Semicolon, // ;
}

impl Delimiter {
    pub fn as_str(&self) -> &'static str {
        match self {
            Delimiter::LParen => "L_PAREN",
            Delimiter::RParen => "R_PAREN",
            Delimiter::LBrace => "L_BRACE",
            Delimiter::RBrace => "R_BRACE",
            Delimiter::LBracket => "L_BRACKT",
            Delimiter::RBracket => "R_BRACKT",
            Delimiter::Comma => "COMMA",
            Delimiter::Semicolon => "SEMICOLON",
        }
    }

    pub fn lexeme(&self) -> String {
        match self {
            Delimiter::LParen => "(".to_string(),
            Delimiter::RParen => ")".to_string(),
            Delimiter::LBrace => "{".to_string(),
            Delimiter::RBrace => "}".to_string(),
            Delimiter::LBracket => "[".to_string(),
            Delimiter::RBracket => "]".to_string(),
            Delimiter::Comma => ",".to_string(),
            Delimiter::Semicolon => ";".to_string(),
        }
    }
}

// Token类型枚举
#[derive(Debug, Clone, PartialEq)]
pub enum TokenType {
    Keyword(Keyword),
    Operator(Operator),
    Delimiter(Delimiter),
    Identifier(String), // 存储标识符的原始名称
    IntConst(i64),
}

impl TokenType {
    // 获取token的名称
    pub fn token_name(&self) -> &str {
        match self {
            TokenType::Keyword(k) => k.token_name(),
            TokenType::Operator(o) => o.token_name(),
            TokenType::Delimiter(d) => d.as_str(),
            TokenType::Identifier(_) => "IDENT",
            TokenType::IntConst(_) => "INTEGER_CONST",
        }
    }

    // 获取token的lexeme（原始文本）
    pub fn lexeme(&self) -> String {
        match self {
            TokenType::Keyword(k) => k.lexeme().to_string(),
            TokenType::Operator(o) => o.lexeme(),
            TokenType::Delimiter(d) => d.lexeme(),
            TokenType::Identifier(name) => name.clone(),
            TokenType::IntConst(value) => value.to_string(),
        }
    }
}

// Token结构体
#[derive(Debug, Clone, PartialEq)]
pub struct Token {
    pub token_type: TokenType,
    pub line: usize,
    pub index: usize,
}

// 词法错误结构体
#[derive(Debug, Clone)]
pub struct LexicalError {
    pub line: usize,
    pub message: String,
}

// 辅助函数：从pest规则转换为关键字
fn rule_to_keyword(rule: Rule) -> Option<Keyword> {
    match rule {
        Rule::const_kw => Some(Keyword::Const),
        Rule::int_kw => Some(Keyword::Int),
        Rule::void_kw => Some(Keyword::Void),
        Rule::if_kw => Some(Keyword::If),
        Rule::else_kw => Some(Keyword::Else),
        Rule::while_kw => Some(Keyword::While),
        Rule::break_kw => Some(Keyword::Break),
        Rule::continue_kw => Some(Keyword::Continue),
        Rule::return_kw => Some(Keyword::Return),
        _ => None,
    }
}

// 辅助函数：从pest规则转换为运算符
fn rule_to_operator(rule: Rule) -> Option<Operator> {
    match rule {
        Rule::plus => Some(Operator::Plus("PLUS".to_string())),
        Rule::minus => Some(Operator::Minus("MINUS".to_string())),
        Rule::mul => Some(Operator::Mul("MUL".to_string())),
        Rule::div => Some(Operator::Div("DIV".to_string())),
        Rule::mod_op => Some(Operator::Mod("MOD".to_string())),
        Rule::assign => Some(Operator::Assign("ASSIGN".to_string())),
        Rule::eq => Some(Operator::Eq("EQ".to_string())),
        Rule::neq => Some(Operator::Neq("NEQ".to_string())),
        Rule::lt => Some(Operator::Lt("LT".to_string())),
        Rule::gt => Some(Operator::Gt("GT".to_string())),
        Rule::le => Some(Operator::Le("LE".to_string())),
        Rule::ge => Some(Operator::Ge("GE".to_string())),
        Rule::not => Some(Operator::Not("NOT".to_string())),
        Rule::and => Some(Operator::And("AND".to_string())),
        Rule::or => Some(Operator::Or("OR".to_string())),
        _ => None,
    }
}

// 辅助函数：从pest规则转换为分隔符
fn rule_to_delimiter(rule: Rule) -> Option<Delimiter> {
    match rule {
        Rule::l_paren => Some(Delimiter::LParen),
        Rule::r_paren => Some(Delimiter::RParen),
        Rule::l_brace => Some(Delimiter::LBrace),
        Rule::r_brace => Some(Delimiter::RBrace),
        Rule::l_bracket => Some(Delimiter::LBracket),
        Rule::r_bracket => Some(Delimiter::RBracket),
        Rule::comma => Some(Delimiter::Comma),
        Rule::semicolon => Some(Delimiter::Semicolon),
        _ => None,
    }
}

// 整数解析函数
pub fn parse_int(raw: &str) -> i64 {
    if raw.starts_with("0x") || raw.starts_with("0X") {
        // 十六进制
        i64::from_str_radix(&raw[2..], 16).unwrap_or(0)
    } else if raw.starts_with("0") && raw.len() > 1 {
        // 八进制（以0开头但不是0x/0X）
        i64::from_str_radix(raw, 8).unwrap_or(0)
    } else {
        // 十进制
        raw.parse().unwrap_or(0)
    }
}

pub fn parse_file(filename: &str) -> Result<Vec<Token>, Box<dyn std::error::Error>> {
    let content = fs::read_to_string(filename)?;

    match ExpressionParser::parse(Rule::program, &content) {
        Ok(pairs) => {
            let mut tokens = Vec::new();
            let mut line_token_counts: HashMap<usize, usize> = HashMap::new();
            let mut has_errors = false;

            for pair in pairs {
                let (parsed_tokens, errors) = parse_tokens(pair, &mut line_token_counts);
                tokens.extend(parsed_tokens);
                if !errors.is_empty() {
                    has_errors = true;
                    // 输出所有错误信息
                    for error in errors {
                        eprintln!("Error type A at Line {}:{}", error.line, error.message);
                    }
                }
            }

            // 只有在没有错误时才输出tokens
            if !has_errors {
                // 输出所有tokens（满足README要求）
                for token in &tokens {
                    let token_name = token.token_type.token_name();
                    let value = token.token_type.lexeme();
                    eprintln!("{} {} at Line {}.", token_name, value, token.line);
                }
            }

            Ok(tokens)
        }
        Err(e) => {
            // 解析完全失败时，尝试提取行号信息
            let error_msg = format!("{e}");
            let line_num = extract_line_number_from_error(&error_msg).unwrap_or(1);
            eprintln!("Error type A at Line {line_num}:Lexical analysis failed");
            Err(Box::new(e))
        }
    }
}

fn parse_tokens(
    pair: pest::iterators::Pair<Rule>,
    line_token_counts: &mut HashMap<usize, usize>,
) -> (Vec<Token>, Vec<LexicalError>) {
    let mut tokens = Vec::new();
    let mut errors = Vec::new();

    match pair.as_rule() {
        Rule::program_item => {
            let inner = pair.into_inner().next().unwrap();
            match inner.as_rule() {
                Rule::token => {
                    let (line, _col) = inner.line_col();
                    let token_inner = inner.into_inner().next().unwrap();
                    let lexeme = token_inner.as_str().to_string();

                    // 获取该行的token索引
                    let index = *line_token_counts.get(&line).unwrap_or(&0) + 1;
                    line_token_counts.insert(line, index);

                    let rule = token_inner.as_rule();

                    if let Some(keyword) = rule_to_keyword(rule) {
                        tokens.push(Token {
                            token_type: TokenType::Keyword(keyword),
                            line,
                            index,
                        });
                    } else if let Some(operator) = rule_to_operator(rule) {
                        tokens.push(Token {
                            token_type: TokenType::Operator(operator),
                            line,
                            index,
                        });
                    } else if let Some(delimiter) = rule_to_delimiter(rule) {
                        tokens.push(Token {
                            token_type: TokenType::Delimiter(delimiter),
                            line,
                            index,
                        });
                    } else if rule == Rule::ident {
                        tokens.push(Token {
                            token_type: TokenType::Identifier(lexeme.clone()),
                            line,
                            index,
                        });
                    } else if rule == Rule::integer_const {
                        let value = parse_int(&lexeme);
                        tokens.push(Token {
                            token_type: TokenType::IntConst(value),
                            line,
                            index,
                        });
                    }
                }
                Rule::error_char => {
                    let span = inner.as_span();
                    let (line, _col) = span.start_pos().line_col();
                    let lexeme = inner.as_str();

                    errors.push(LexicalError {
                        line,
                        message: format!("Unrecognized character '{lexeme}'"),
                    });
                }
                _ => {}
            }
        }
        _ => {
            for inner_pair in pair.into_inner() {
                let (mut child_tokens, mut child_errors) =
                    parse_tokens(inner_pair, line_token_counts);
                tokens.append(&mut child_tokens);
                errors.append(&mut child_errors);
            }
        }
    }

    (tokens, errors)
}

// 辅助函数：从错误信息中提取行号
fn extract_line_number_from_error(error_msg: &str) -> Option<usize> {
    // pest错误格式通常包含"at line X"或"行 X"
    if let Some(start) = error_msg.find("line ") {
        let line_part = &error_msg[start + 5..];
        if let Some(end) = line_part.find(char::is_whitespace) {
            let line_str = &line_part[..end];
            line_str.parse().ok()
        } else {
            line_part.parse().ok()
        }
    } else {
        None
    }
}
