use pest::Parser;
use pest_derive::Parser;

#[derive(Parser)]
#[grammar = "lexer.pest"] // 指向你的 .pest 文件
struct SysYLexer;

#[derive(Debug, PartialEq)]
pub struct TokenInfo {
    token: Token,
    token_str: String,
    matched_rule: Rule,
    line_index: usize,
    col_index: usize,
}

impl std::fmt::Display for TokenInfo {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        let printed_token_str = match &self.token {
            Token::IntegerConst(value) => format!("{}", value),
            _ => format!("{}", self.token_str),
        };

        write!(
            f,
            "{:?} {} at Line {}.",
            self.matched_rule, printed_token_str, self.line_index
        )
    }
}

#[derive(Debug, PartialEq)]
pub enum Token {
    Const,
    Int,
    Void,
    If,
    Else,
    While,
    Break,
    Continue,
    Return,
    Plus,
    Minus,
    Mul,
    Div,
    Mod,
    Assign,
    Eq,
    Neq,
    Lt,
    Gt,
    Le,
    Ge,
    Not,
    And,
    Or,
    LParen,
    RParen,
    LBrace,
    RBrace,
    LBracket,
    RBracket,
    Comma,
    Semicolon,
    Ident(String),
    IntegerConst(i64),
    // 词法分析器不应将这些作为最终 token 输出，但为了完整性可以保留
    Whitespace(String),
    LineComment(String),
    MultilineComment(String),
    Unknown,
}

pub fn tokenize(input: &str) -> Vec<TokenInfo> {
    // 使用 `program` 规则作为词法分析器的入口
    let pairs = SysYLexer::parse(Rule::program, input)
        .unwrap_or_else(|e| panic!("Parse error: {}", e))
        .next()
        .unwrap();


    let mut error = false;
    let mut token_infos: Vec<TokenInfo> = vec![];

    for p in pairs.into_inner() {
        let (line_idx, col_idx) = p.as_span().start_pos().line_col();
        let token = match p.as_rule() {
            Rule::CONST => Token::Const,
            Rule::INT => Token::Int,
            Rule::VOID => Token::Void,
            Rule::IF => Token::If,
            Rule::ELSE => Token::Else,
            Rule::WHILE => Token::While,
            Rule::BREAK => Token::Break,
            Rule::CONTINUE => Token::Continue,
            Rule::RETURN => Token::Return,
            Rule::PLUS => Token::Plus,
            Rule::MINUS => Token::Minus,
            Rule::MUL => Token::Mul,
            Rule::DIV => Token::Div,
            Rule::MOD => Token::Mod,
            Rule::ASSIGN => Token::Assign,
            Rule::EQ => Token::Eq,
            Rule::NEQ => Token::Neq,
            Rule::LT => Token::Lt,
            Rule::GT => Token::Gt,
            Rule::LE => Token::Le,
            Rule::GE => Token::Ge,
            Rule::NOT => Token::Not,
            Rule::AND => Token::And,
            Rule::OR => Token::Or,
            Rule::L_PAREN => Token::LParen,
            Rule::R_PAREN => Token::RParen,
            Rule::L_BRACE => Token::LBrace,
            Rule::R_BRACE => Token::RBrace,
            Rule::L_BRACKT => Token::LBracket,
            Rule::R_BRACKT => Token::RBracket,
            Rule::COMMA => Token::Comma,
            Rule::SEMICOLON => Token::Semicolon,
            Rule::IDENT => Token::Ident(p.as_str().to_string()),
            Rule::INTEGER_CONST =>  {
                //TODO 优化: 明明在pest里已经对不同进制的数字做了区分，为什么还要在这里再区分一遍, 没必要做重复的工作
                if p.as_str().starts_with("0x") || p.as_str().starts_with("0X") {
                    Token::IntegerConst(i64::from_str_radix(&p.as_str()[2..], 16).expect(format!("Error type A at Line {}: Invalid Hex Number {}.", line_idx, p.as_str()).as_str()))
                } else if p.as_str().starts_with("0") {
                    Token::IntegerConst(i64::from_str_radix(&p.as_str()[0..], 8).expect(format!("Error type A at Line {}: Invalid Octal Number {}.", line_idx, p.as_str()).as_str()))
                } else {
                    Token::IntegerConst(p.as_str().parse().expect(format!("Error type A at Line {}: Invalid Number {}.", line_idx, p.as_str()).as_str()))
                }
            }
            Rule::LINE_COMMENT | 
            Rule::MULTILINE_COMMENT => continue,
            Rule::EOI => continue,
            Rule::UNKNOWN_TOKEN => {
                eprintln!("Error type A at Line {}: Mysterious character {}.", line_idx, p.as_str());
                error = true;

                continue;
            },
            _ => unreachable!(),
        };

        token_infos.push(TokenInfo {
            token: token,
            token_str: p.as_str().to_string(),
            matched_rule: p.as_rule(),
            line_index: line_idx,
            col_index: col_idx,
        });

    }

    if error {
        vec![]
    } else {
        token_infos
    }
    
}
