use std::{
    fs::File,
    io::{BufRead, BufReader, Read, Write},
    mem::{size_of, size_of_val},
};

use super::{invalid_char, LexerStage, Token, TokenType, KEYWORD_MAP};

fn now_char(s: &String) -> char {
    *s.chars().collect::<Vec<_>>().get(0).expect("获取失败")
}

#[derive(Debug)]
pub struct Lexer {
    // _file_ptr: Box<Vec<char>>,
    // _line_no: i32,
    // _mark: Box<String>,
    // _index: i32,
    _input_file_path: Box<String>
}

impl Lexer {
    fn _str_offset_one(code_ptr: &mut String) {
        *code_ptr = String::from(&code_ptr[1..]);
    }
    fn _next_token(code_ptr: &mut String, line_no: &mut i32) -> Token {
        use super::token::LexerStage::*;

        let mut lexer_stage = Start;
        let mut token_type = TokenType::EOF;
        let mut token_str = String::default();
        // println!("lexer_stage:{:?}, token_type:{:?}\n", lexer_stage, token_type);
        while lexer_stage != Done && code_ptr.len() > 0 {
            match lexer_stage {
                Start => Self::next_token_start_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InId => Self::next_token_in_id_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InNum => Self::next_token_in_num_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InDiv => Self::next_token_in_div_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InComment => Self::next_token_in_comment_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                EndComment => Self::next_token_end_comment_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InLess => Self::next_token_in_less_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InGreater => Self::next_token_in_greater_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InAssign => Self::next_token_in_assign_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                InNot => Self::next_token_in_not_stage(
                    code_ptr,
                    &mut lexer_stage,
                    &mut token_type,
                    &mut token_str,
                    line_no
                ),
                Done => {}
            }
        }
        Token::new (
            token_type,
            token_str,
            *line_no,
        )
    }
    #[allow(unused)]
    pub fn lexical_analysis(&self) -> Vec<Token> {
        let f = File::open(&*self._input_file_path);
        if f.is_err() {
            panic!("文件打开失败");
        }
        let mut reader = BufReader::new(f.unwrap());
        let mut buf = Vec::<u8>::new();
        reader.read_to_end(&mut buf).expect("read to end failed");
        let mut s = String::from_utf8(buf).expect("from u8 failed");
        
        let mut line_no = 1;
        
        let mut token_list = Vec::default();
        loop {
            let mut token_obj = Self::_next_token(&mut s, &mut line_no);
            if token_obj.token_type() == TokenType::EOF {
                break;
            }
            token_list.push(token_obj);
            // token_obj = Self::_next_token(&mut s, &mut line_no);
        }
        token_list
    }
    #[allow(unused)]
    pub fn new<S: AsRef<str>>(input_file_path: S) -> Self {
        Self {
            _input_file_path: Box::from(String::from(input_file_path.as_ref()))
        }
    }
    /// 开始状态分派方法
    fn next_token_start_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char.is_ascii_alphabetic() {
            // 如果当前读取到的字符是一个字母，则词法分析器应进入“正在读取单词”状态
            *lexer_stage = LexerStage::InId;
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else if now_char.is_ascii_digit() {
            // 如果当前读取到的字符是一个数字，则词法分析器应进入“正在读取数字”状态
            *lexer_stage = LexerStage::InNum;
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else if now_char.is_whitespace() {
            // 如果当前读取到的字符是一个空白符，则词法分析器应停留在“开始”状态，并丢掉当前读取到的字符。
            // 特别的，如果当前读取到的字符是一个换行符，则当前行数需要加1
            
            if now_char == '\n' {
                *line_no += 1;
            }
            Self::_str_offset_one(code_ptr);
        } else {
            // 如果当前读取到的字符是一个“/”，则我们此时并不知道这个“/”需不需要被保存下来。我们必须等到确定这个“/”是一个除号时，
            // 再保存这个“/”，故此时，我们需要将saveBool置为false
            // 如果当前读取到的字符是“+”、“-”、“*”、“;”、“,”、“(”、“)”、“[”、“]”、“{”、“}”或EOF这些仅由一个字符构成的记号
            // ，则我们可以立即确定当前记号的类别，并令词法分析器立即进入“完成”状态
            // 如果当前读取到的字符是“/”、“<”、“>”、“=”或“!”，则词法分析器应进入各中间状态
            // 如果当前读取到的字符不满足上述各种情况之一，则报错退出
            let mut if_push = true;
            match now_char {
                '+' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::Plus;
                }
                '-' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::Minus;
                }
                '*' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::Multiply;
                }
                '/' => {
                    *lexer_stage = LexerStage::InDiv;
                    if_push = false;
                }
                '<' => {
                    *lexer_stage = LexerStage::InLess;
                }
                '>' => {
                    *lexer_stage = LexerStage::InGreater;
                }
                '=' => {
                    *lexer_stage = LexerStage::InAssign;
                }
                '!' => {
                    *lexer_stage = LexerStage::InNot;
                }
                ';' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::Semicolon;
                }
                ',' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::Comma;
                }
                '(' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::LeftRoundBracket;
                }
                ')' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::RightRoundBracket;
                }
                '[' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::LeftSquareBracket;
                }
                ']' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::RightSquareBracket;
                }
                '{' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::LeftCurlyBracket;
                }
                '}' => {
                    *lexer_stage = LexerStage::Done;
                    *token_type = TokenType::RightCurlyBracket;
                }
                // '#' => {}
                // '%' => {}
                _ => invalid_char(now_char, *line_no),
            }
            if if_push {
                token_str.push(now_char);
            }
            Self::_str_offset_one(code_ptr);
        }
    }
    /// 正在读取单词状态分派方法
    fn next_token_in_id_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char.is_ascii_alphabetic() {
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else {
            *lexer_stage = LexerStage::Done;
            if let Some(val) = KEYWORD_MAP.get(token_str.as_str()) {
                *token_type = *val;
            } else {
                *token_type = TokenType::Id;
            }
        }
    }
    /// 正在读取数字状态分派方法
    fn next_token_in_num_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char.is_ascii_digit() {
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else {
            *lexer_stage = LexerStage::Done;
            *token_type = TokenType::Number;
        }
    }
    /// 正在读取除号状态分派方法
    fn next_token_in_div_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char == '*' {
            *lexer_stage = LexerStage::InComment;
            Self::_str_offset_one(code_ptr);
        } else {
            *lexer_stage = LexerStage::Done;
            *token_type = TokenType::Divide;
            *token_str = String::from("/");
        }
    }
    /// 正在读取注释状态分派方法
    fn next_token_in_comment_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char == '*' {
            *lexer_stage = LexerStage::EndComment;
        } else if now_char == '\n' {
            *line_no += 1;
        }
        Self::_str_offset_one(code_ptr);
    }
    /// 结束读取注释状态分派方法
    fn next_token_end_comment_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char == '/' {
            *lexer_stage = LexerStage::Start;
        } else if now_char != '*' {
            *lexer_stage = LexerStage::InComment;
            if now_char == '\n' {
                *line_no += 1;
            }
        }
        Self::_str_offset_one(code_ptr);
    }
    /// 字符串状态分派方法
    // fn next_token_str_stage(
    //     
    //     now_char: char,
    //     lexer_stage: &mut LexerStage,
    //     token_type: &mut TokenType,
    //     token_str: &mut String,
    // ) {
    //     if now_char == '"' {
    //         *lexer_stage = LexerStage::Start;
    //     }
    // }
    /// 正在读取小于号状态分派方法
    fn next_token_in_less_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        *lexer_stage = LexerStage::Done;

        if now_char == '=' {
            *token_type = TokenType::LessEq;
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else {
            *token_type = TokenType::Less;
        }
    }
    /// 正在读取大于号状态分派方法
    fn next_token_in_greater_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        *lexer_stage = LexerStage::Done;

        if now_char == '=' {
            *token_type = TokenType::GreaterEq;
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else {
            *token_type = TokenType::Greater;
        }
    }
    /// 正在读取等号状态分派方法
    fn next_token_in_assign_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        *lexer_stage = LexerStage::Done;

        if now_char == '=' {
            *token_type = TokenType::Eq;
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else {
            *token_type = TokenType::Assign;
        }
    }
    /// 正在读取叹号状态分派方法
    fn next_token_in_not_stage(
        code_ptr: &mut String,
        lexer_stage: &mut LexerStage,
        token_type: &mut TokenType,
        token_str: &mut String,
        line_no: &mut i32,
    ) {
        let now_char = now_char(&code_ptr);
        if now_char == '=' {
            *lexer_stage = LexerStage::Done;
            *token_type = TokenType::NotEq;
            token_str.push(now_char);
            Self::_str_offset_one(code_ptr);
        } else {
            invalid_char(now_char, *line_no);
        }
    }
}

#[test]
pub fn test_this() {
    let mut lexer = Lexer::new("C:\\Users\\ThinkPad\\Desktop\\b\\main.cpp");
    println!("{:#?}", lexer.lexical_analysis());
    // let v = vec!["3gsdg".to_string(), "qewt".to_string(),"你好".to_string(),"すげい".to_string()];
    // println!("{:?}", size_of_val(&v));
    // println!("{:?}", size_of::<Box<BufReader<File>>>());
}
