#![allow(non_camel_case_types)]

use pest::Parser;
use pest_derive::Parser;

#[derive(Debug, PartialEq, Clone)]
pub enum TOKEN {
    INTEGER_CONST(i64),
    IDENT(String),
    CONST(String),
    INT(String),
    VOID(String),
    IF(String),
    ELSE(String),
    WHILE(String),
    BREAK(String),
    CONTINUE(String),
    RETURN(String),
    PLUS(String),
    MINUS(String),
    MUL(String),
    DIV(String),
    MOD(String),
    EQ(String),
    NEQ(String),
    ASSIGN(String),
    LE(String),
    GE(String),
    LT(String),
    GT(String),
    NOT(String),
    AND(String),
    OR(String),
    COMMA(String),
    SEMICOLON(String),
    L_PAREN(String),
    R_PAREN(String),
    L_BRACE(String),
    R_BRACE(String),
    L_BRACKT(String),
    R_BRACKT(String),
    Error(String),
}
use std::fmt;

impl fmt::Display for TOKEN {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        match self {
            TOKEN::INTEGER_CONST(value) => write!(f, "INTEGER_CONST {}", value),
            TOKEN::IDENT(text) => write!(f, "IDENT {}", text),
            TOKEN::CONST(text) => write!(f, "CONST {}", text),
            TOKEN::INT(text) => write!(f, "INT {}", text),
            TOKEN::VOID(text) => write!(f, "VOID {}", text),
            TOKEN::IF(text) => write!(f, "IF {}", text),
            TOKEN::ELSE(text) => write!(f, "ELSE {}", text),
            TOKEN::WHILE(text) => write!(f, "WHILE {}", text),
            TOKEN::BREAK(text) => write!(f, "BREAK {}", text),
            TOKEN::CONTINUE(text) => write!(f, "CONTINUE {}", text),
            TOKEN::RETURN(text) => write!(f, "RETURN {}", text),
            TOKEN::PLUS(text) => write!(f, "PLUS {}", text),
            TOKEN::MINUS(text) => write!(f, "MINUS {}", text),
            TOKEN::MUL(text) => write!(f, "MUL {}", text),
            TOKEN::DIV(text) => write!(f, "DIV {}", text),
            TOKEN::MOD(text) => write!(f, "MOD {}", text),
            TOKEN::EQ(text) => write!(f, "EQ {}", text),
            TOKEN::NEQ(text) => write!(f, "NEQ {}", text),
            TOKEN::ASSIGN(text) => write!(f, "ASSIGN {}", text),
            TOKEN::LE(text) => write!(f, "LE {}", text),
            TOKEN::GE(text) => write!(f, "GE {}", text),
            TOKEN::LT(text) => write!(f, "LT {}", text),
            TOKEN::GT(text) => write!(f, "GT {}", text),
            TOKEN::NOT(text) => write!(f, "NOT {}", text),
            TOKEN::AND(text) => write!(f, "AND {}", text),
            TOKEN::OR(text) => write!(f, "OR {}", text),
            TOKEN::COMMA(text) => write!(f, "COMMA {}", text),
            TOKEN::SEMICOLON(text) => write!(f, "SEMICOLON {}", text),
            TOKEN::L_PAREN(text) => write!(f, "L_PAREN {}", text),
            TOKEN::R_PAREN(text) => write!(f, "R_PAREN {}", text),
            TOKEN::L_BRACE(text) => write!(f, "L_BRACE {}", text),
            TOKEN::R_BRACE(text) => write!(f, "R_BRACE {}", text),
            TOKEN::L_BRACKT(text) => write!(f, "L_BRACKT {}", text),
            TOKEN::R_BRACKT(text) => write!(f, "R_BRACKT {}", text),
            TOKEN::Error(text) => write!(f, "{}", text),
        }
    }
}


#[derive(Parser)]
#[grammar = "src/lexer_parser/lexer.pest"] // Make sure this file exists in the correct path
pub struct LexerParser;

// And update all the rule matches:
pub fn tokenize(input: &str) -> Result<Vec<(TOKEN, usize)>, Vec<(TOKEN,usize)> >{
    let mut tokens = Vec::new();
    let mut errors = Vec::new();
    let mut is_error = false;

    match LexerParser::parse(Rule::file, input) {
        Ok(pairs) => {
                        for pair in pairs {
                            for inner in pair.into_inner() {
                                let line = inner.line_col().0;
                                match inner.as_rule() {
                                    Rule::INTEGER_CONST => {
                                        let text = inner.as_str();
                                        let value = if text.starts_with("0x") || text.starts_with("0X") {
                                            i64::from_str_radix(&text[2..], 16).unwrap()
                                        } else if text.starts_with('0') && text.len() > 1 {
                                            i64::from_str_radix(&text[1..], 8).unwrap()
                                        } else {
                                            text.parse::<i64>().unwrap()
                                        };
                                        tokens.push((TOKEN::INTEGER_CONST(value), line));
                                    }
                                    Rule::ERROR => {
                                        is_error=true;
                                        let text=inner.as_str();
                                        errors.push((TOKEN::Error(text.to_string()), line));
                                    }
        
                                    // 对于简单的规则，可以直接用 match 简化
                                    rule => {
                                        let text = inner.as_str();
                                        let token = match rule {
                                            Rule::IDENT => TOKEN::IDENT(text.to_string()),
                                            Rule::CONST => TOKEN::CONST(text.to_string()),
                                            Rule::INT => TOKEN::INT(text.to_string()),
                                            Rule::VOID => TOKEN::VOID(text.to_string()),
                                            Rule::IF => TOKEN::IF(text.to_string()),
                                            Rule::ELSE => TOKEN::ELSE(text.to_string()),
                                            Rule::WHILE => TOKEN::WHILE(text.to_string()),
                                            Rule::BREAK => TOKEN::BREAK(text.to_string()),
                                            Rule::CONTINUE => TOKEN::CONTINUE(text.to_string()),
                                            Rule::RETURN => TOKEN::RETURN(text.to_string()),
                                            Rule::PLUS => TOKEN::PLUS(text.to_string()),
                                            Rule::MINUS => TOKEN::MINUS(text.to_string()),
                                            Rule::MUL => TOKEN::MUL(text.to_string()),
                                            Rule::DIV => TOKEN::DIV(text.to_string()),
                                            Rule::MOD => TOKEN::MOD(text.to_string()),
                                            Rule::EQ => TOKEN::EQ(text.to_string()),
                                            Rule::NEQ => TOKEN::NEQ(text.to_string()),
                                            Rule::ASSIGN => TOKEN::ASSIGN(text.to_string()),
                                            Rule::LE => TOKEN::LE(text.to_string()),
                                            Rule::GE => TOKEN::GE(text.to_string()),
                                            Rule::LT => TOKEN::LT(text.to_string()),
                                            Rule::GT => TOKEN::GT(text.to_string()),
                                            Rule::NOT => TOKEN::NOT(text.to_string()),
                                            Rule::AND => TOKEN::AND(text.to_string()),
                                            Rule::OR => TOKEN::OR(text.to_string()),
                                            Rule::COMMA => TOKEN::COMMA(text.to_string()),
                                            Rule::SEMICOLON => TOKEN::SEMICOLON(text.to_string()),
                                            Rule::L_PAREN => TOKEN::L_PAREN(text.to_string()),
                                            Rule::R_PAREN => TOKEN::R_PAREN(text.to_string()),
                                            Rule::L_BRACE => TOKEN::L_BRACE(text.to_string()),
                                            Rule::R_BRACE => TOKEN::R_BRACE(text.to_string()),
                                            Rule::L_BRACKT => TOKEN::L_BRACKT(text.to_string()),
                                            Rule::R_BRACKT => TOKEN::R_BRACKT(text.to_string()),
                                            _ => continue, // 忽略不需要处理的规则（如WHITESPACE, COMMENT等）
                                        };
                                        tokens.push((token, line));
                                    }
                                }
                            }
                        }
            }
        Err(_) => todo!(),
    }
    if is_error {
        Err(errors)
    } else {
        Ok(tokens)
    }
}
