use pest::Parser;
use pest_derive::Parser;
use std::fmt;
use std::ops::Range;

#[derive(Parser)]
#[grammar = "lex.pest"]
pub struct LexParser;

#[derive(Debug, Clone)]
pub struct TokenStream<'a> {
    pub tokens: Vec<Token<'a>>,
    pub file: &'a str,
}

impl fmt::Display for TokenStream<'_> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        let mut ret = String::new();
        let mut err = String::new();
        for token in &self.tokens {
            match token.kind {
                TokenKind::KeyWord => ret.push_str(&format!(
                    "{} {} at Line {}.",
                    token.value.to_uppercase(),
                    token.value,
                    token.span.line
                )),

                TokenKind::Ident => ret.push_str(&format!(
                    "IDENT {} at Line {}.",
                    token.value, token.span.line
                )),

                TokenKind::Invalid => {
                    err.push_str(&format!(
                        "Error type A at Line {}: Mysterious character \"{}\".",
                        token.span.line, token.value
                    ));
                    err.push('\n');
                }
                TokenKind::String => ret.push_str(&format!(
                    "STRING {} at Line {}.",
                    token.value, token.span.line
                )),

                TokenKind::IntegerConst => ret.push_str(&format!(
                    "INTEGER_CONST {} at Line {}.",
                    {
                        if token.value.starts_with("0x") | token.value.starts_with("0X") {
                            u64::from_str_radix(&token.value[2..], 16).unwrap()
                        } else if token.value.starts_with("0o") | token.value.starts_with("0O") {
                            u64::from_str_radix(&token.value[2..], 8).unwrap()
                        } else if token.value.starts_with("0") && token.value.len() > 1 {
                            if token.value[1..]
                                .chars()
                                .into_iter()
                                .all(|c| c >= '0' && c <= '7')
                            {
                                u64::from_str_radix(&token.value[1..], 8).unwrap()
                            } else {
                                token.value.parse::<u64>().unwrap()
                            }
                        } else if token.value.starts_with("0b") | token.value.starts_with("0B") {
                            u64::from_str_radix(&token.value[2..], 2).unwrap()
                        } else {
                            token.value.parse::<u64>().unwrap()
                        }
                    },
                    token.span.line
                )),
                TokenKind::Plus => ret.push_str(&format!(
                    "PLUS {} at Line {}.",
                    token.value, token.span.line
                )),

                TokenKind::Minus => ret.push_str(&format!(
                    "MINUS {} at Line {}.",
                    token.value, token.span.line
                )),

                TokenKind::Div => {
                    ret.push_str(&format!("DIV {} at Line {}.", token.value, token.span.line))
                }

                TokenKind::Mul => {
                    ret.push_str(&format!("MUL {} at Line {}.", token.value, token.span.line))
                }

                TokenKind::Mod => {
                    ret.push_str(&format!("MOD {} at Line {}.", token.value, token.span.line))
                }

                TokenKind::Assign => ret.push_str(&format!(
                    "ASSIGN {} at Line {}.",
                    token.value, token.span.line
                )),

                TokenKind::Eq => {
                    ret.push_str(&format!("EQ {} at Line {}.", token.value, token.span.line));
                }

                TokenKind::Neq => {
                    ret.push_str(&format!("NEQ {} at Line {}.", token.value, token.span.line));
                }

                TokenKind::Lt => {
                    ret.push_str(&format!("LT {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::Gt => {
                    ret.push_str(&format!("GT {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::Le => {
                    ret.push_str(&format!("LE {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::Ge => {
                    ret.push_str(&format!("GE {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::Not => {
                    ret.push_str(&format!("NOT {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::And => {
                    ret.push_str(&format!("AND {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::Or => {
                    ret.push_str(&format!("OR {} at Line {}.", token.value, token.span.line));
                }
                TokenKind::OpenParen => {
                    ret.push_str(&format!(
                        "L_PAREN {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::CloseParen => {
                    ret.push_str(&format!(
                        "R_PAREN {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::OpenBrace => {
                    ret.push_str(&format!(
                        "L_BRACE {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::CloseBrace => {
                    ret.push_str(&format!(
                        "R_BRACE {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::OpenBracket => {
                    ret.push_str(&format!(
                        "L_BRACKT {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::CloseBracket => {
                    ret.push_str(&format!(
                        "R_BRACKT {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::Comma => {
                    ret.push_str(&format!(
                        "COMMA {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::Semicolon => {
                    ret.push_str(&format!(
                        "SEMICOLON {} at Line {}.",
                        token.value, token.span.line
                    ));
                }
                TokenKind::Eof => {
                    if !ret.is_empty() {
                        ret.remove(ret.len() - 1);
                    }

                    if !err.is_empty() {
                        err.remove(err.len() - 1);
                    }
                }
            }
            ret.push('\n');
        }

        if !err.is_empty() {
            //err.remove(err.len() - 1);
            println!("{err}");
            return write!(f, "{err}");
        }
        ret.remove(ret.len() - 1);
        write!(f, "{}", ret)
    }
}

impl !Send for TokenStream<'_> {}
impl !Sync for TokenStream<'_> {}

#[derive(Debug, Clone)]
pub struct Token<'a> {
    pub kind: TokenKind,
    pub span: Span,
    pub value: &'a str,
}

// 明确是单线程的
impl !Send for Token<'_> {}
impl !Sync for Token<'_> {}

#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TokenKind {
    KeyWord,
    Ident,
    Invalid,
    String,
    IntegerConst,
    Plus,
    Minus,
    Div,
    Mul,
    Mod,
    Assign,
    Eq,
    Neq,
    Lt,
    Gt,
    Le,
    Ge,
    Not,
    And,
    Or,
    OpenParen,
    CloseParen,
    OpenBrace,
    CloseBrace,
    OpenBracket,
    CloseBracket,
    Comma,
    Semicolon,
    Eof,
}

#[derive(Debug, Clone)]
pub struct Span {
    // 从1开始计数
    line: usize,
    col: usize,
    range: Range<usize>,
}

impl Span {
    pub fn new(line: usize, col: usize, start: usize, end: usize) -> Self {
        Self {
            line,
            col,
            range: Range { start, end },
        }
    }
}

macro_rules! gen_token {
    ($span:expr, $token:expr, $kind:tt) => {{
        let (no_white_space, start, end) = trim_space($token.as_str());
        $span.range.start += start;
        $span.range.end -= end;
        Token {
            kind: TokenKind::$kind,
            span: $span,
            value: no_white_space,
        }
    }};
}

pub fn tokenize<'a>(file: &'a str, input: &'a str) -> TokenStream<'a> {
    let tokens = LexParser::parse(Rule::PROGRAM, input).expect("parse {file} failed");
    let mut token_vec = Vec::new();

    for token in tokens {
        let (line, col) = token.line_col();
        let span = token.as_span();
        let (start, end) = (span.start(), span.end());
        let mut span = Span::new(line, col, start, end);
        let t = match token.as_rule() {
            Rule::EOI => gen_token!(span, token, Eof),
            Rule::CONST
            | Rule::INT
            | Rule::VOID
            | Rule::IF
            | Rule::ELSE
            | Rule::WHILE
            | Rule::BREAK
            | Rule::CONTINUE
            | Rule::RETURN => gen_token!(span, token, KeyWord),
            Rule::PLUS => gen_token!(span, token, Plus),
            Rule::MINUS => gen_token!(span, token, Minus),
            Rule::DIV => gen_token!(span, token, Div),
            Rule::MUL => gen_token!(span, token, Mul),
            Rule::MOD => gen_token!(span, token, Mod),
            Rule::ASSIGN => gen_token!(span, token, Assign),
            Rule::EQ => gen_token!(span, token, Eq),
            Rule::NEQ => gen_token!(span, token, Neq),
            Rule::LT => gen_token!(span, token, Lt),
            Rule::GT => gen_token!(span, token, Gt),
            Rule::LE => gen_token!(span, token, Le),
            Rule::GE => gen_token!(span, token, Ge),
            Rule::NOT => gen_token!(span, token, Not),
            Rule::AND => gen_token!(span, token, And),
            Rule::OR => gen_token!(span, token, Or),
            Rule::L_PAREN => gen_token!(span, token, OpenParen),
            Rule::R_PAREN => gen_token!(span, token, CloseParen),
            Rule::L_BRACE => gen_token!(span, token, OpenBrace),
            Rule::R_BRACE => gen_token!(span, token, CloseBrace),
            Rule::L_BRACKET => gen_token!(span, token, OpenBracket),
            Rule::R_BRACKET => gen_token!(span, token, CloseBracket),
            Rule::COMMA => gen_token!(span, token, Comma),
            Rule::SEMICOLON => gen_token!(span, token, Semicolon),
            Rule::IDENT => gen_token!(span, token, Ident),
            Rule::INTEGER_CONST => gen_token!(span, token, IntegerConst),
            Rule::STRING => gen_token!(span, token, String),
            Rule::A => gen_token!(span, token, Invalid),
            _ => {
                eprintln!("unknown token: {:?}", token);
                std::process::exit(1);
            }
        };
        token_vec.push(t);
    }
    TokenStream {
        tokens: token_vec,
        file,
    }
}

fn trim_space(input: &str) -> (&str, usize, usize) {
    let left_ret = input.trim_start();
    let left_offset = input.len() - left_ret.len();
    let right_ret = left_ret.trim_end();
    let right_offset = left_ret.len() - right_ret.len();
    (right_ret, left_offset, right_offset)
}

#[cfg(test)]
mod tests {
    use crate::lex;
    use std::collections::HashMap;
    use std::fs;
    use std::path::Path;

    #[test]
    fn test_lexer() {
        let current_dir = Path::new(std::env!("CARGO_MANIFEST_DIR"));
        let result = fs::read_dir("tests")
            .unwrap()
            .map(|e| e.unwrap().path())
            .collect::<Vec<_>>();
        let data_in = result.iter().filter(|b| b.extension().unwrap() == "in");
        let data_out = result.iter().filter(|b| b.extension().unwrap() == "out");

        let mut data_out_map = HashMap::new();
        for out in data_out {
            data_out_map.insert(out.file_prefix().unwrap().to_str().unwrap(), out);
        }

        for din in data_in {
            let input = fs::read_to_string(din).expect("Fialed to read file");
            let file_name = din.to_str().unwrap();
            let tokens = lex::tokenize(file_name, &input);
            println!(
                "---------------- file_name: {}打印token--------------------------",
                file_name
            );
            println!("{}", tokens);
            println!("---------------- 比较token--------------------------");

            let standard = fs::read_to_string(
                data_out_map
                    .get(din.file_prefix().unwrap().to_str().unwrap())
                    .unwrap(),
            )
            .unwrap();
            let compare = tokens.to_string();
            println!(
                "standard: len: {}\n compare:len: {}",
                standard.len(),
                compare.len()
            );
            if standard.len() != compare.len() {
                println!("长度不相等");
                return;
            }
            let mut line = 1;
            for i in 0..standard.len() {
                if &standard[i..i + 1] == "\n" {
                    line += 1;
                }
                if compare[i..i + 1] != standard[i..i + 1] {
                    println!(
                        "line:{}\ncompare[{i}]({}) != standard[{i}]({})",
                        line,
                        &compare[i..i + 1],
                        &standard[i..i + 1]
                    );
                    return;
                }
            }
            println!("完全相等");
        }

        println!("----------测试完毕------------------");
    }
}
