use serde::Serialize;

use crate::lexer::keyword::is_keyword;
use std::{
    fs::File,
    io::{BufRead, BufReader},
};

pub mod keyword;

#[derive(Debug, Clone, PartialEq)]
pub struct LexerOutputItem {
    pub token: Token,
    pub loc: TokenLocation,
}

/**
 * A token is the smallest unit the parser can understand
 */
#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum Token {
    // Lexer use
    Keyword(String),
    UIntLiteral(String),
    Brace(BraceType),
    Semicolon,
    Identifier(String),
    Operation(OpType),
    Eof,
    Unknown(String),
}

#[derive(Debug, Clone, PartialEq, Serialize)]
pub struct TokenLocation {
    pub row: usize,
    pub column: usize,
    pub len: usize,
}

#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum BraceType {
    OpenCurly,
    CloseCurly,
    OpenSquare,
    CloseSquare,
    OpenRound,
    CloseRound,
}

#[derive(Debug, Clone, PartialEq, Serialize)]
pub enum OpType {
    Negation,
    BitwiseComplement,
    LogicalNegation,
    Add,
    Mul,
    Div
}


#[derive(Debug, Clone, PartialEq)]
pub enum LexerError {
    Empty,
    CannotReadLine,
    NotAKeyword,
    NotAIdentifier,
    UnknownChar,
    UnknownToken,
}

// 辅助结构体
pub struct SplitTokenItem(Token, usize);
pub struct LexLineItem(SplitTokenItem, usize);

/**
 * Lex 一个文件，并计算各 Token 在文件中的位置，在最后添加 EOF
 */
pub fn lex_file(program: File) -> Result<Vec<LexerOutputItem>, LexerError> {
    let mut items: Vec<LexerOutputItem> = vec![];
    let mut fs_in = BufReader::new(program);
    let mut row = 1;
    let mut buf = String::new();

    loop {
        buf.clear();
        match fs_in.read_line(&mut buf) {
            Ok(0) => break,
            Ok(_) => (),
            Err(_) => return Err(LexerError::CannotReadLine),
        };

        match lex_line(&buf) {
            Ok(lltokens) => {
                for lltoken in lltokens {
                    let stoken = lltoken.0;
                    let col = lltoken.1;

                    // row 和 col 从 1 开始
                    items.push(LexerOutputItem {
                        token: stoken.0,
                        loc: TokenLocation {
                            row: row,
                            column: col,
                            len: stoken.1,
                        },
                    });
                }
            }
            Err(e) => {
                println!("Error occurs when process Line {} because {:?}.", row, e);
            }
        }

        row += 1;
    }

    // Append eof:
    items.push(LexerOutputItem {
        token: Token::Eof,
        loc: TokenLocation {
            row: row,
            column: 0,
            len: 1,
        },
    });

    Ok(items)
}

/**
 *  Lex 一行 String，并计算各 Token 的列号
 */
pub fn lex_line(text: &String) -> Result<Vec<LexLineItem>, LexerError> {
    let mut tokens: Vec<LexLineItem> = vec![];
    let mut col = 1;
    let mut start_col = 1;
    let mut buf = String::new();

    fn process_buffer(
        tokens: &mut Vec<LexLineItem>,
        buf: &String,
        start_col: usize,
    ) -> Result<(), LexerError> {
        match split_token(&buf) {
            Ok(stokens) => {
                let mut inner_col = 0;

                for stoken in stokens {
                    let len = stoken.1;
                    tokens.push(LexLineItem(stoken, start_col + inner_col + 1));
                    inner_col += len;
                }

                Ok(())
            }
            Err(e) => {
                println!(
                    "In lex fn : split_token failed when deal with word {} : {:?} .",
                    buf, e
                );

                Err(e)
            }
        }
    }

    for c in text.chars() {
        if c.is_whitespace() {
            if !buf.is_empty() {
                process_buffer(&mut tokens, &buf, start_col).unwrap();
                buf.clear();
            }

            start_col = col;
        } else {
            buf.push(c);
        }

        col += 1;
    }

    if !buf.is_empty() {
        process_buffer(&mut tokens, &buf, start_col).unwrap();
    }

    Ok(tokens)
}

/**
 *  在排除 whitespace 的情况下，分离那些无 whitespace 相连的 Tokens
 *  如 {return{p0xF2F[7]};
 */
pub fn split_token(s: &str) -> Result<Vec<SplitTokenItem>, LexerError> {
    if s.is_empty() {
        return Err(LexerError::Empty);
    }

    let mut ret: Vec<SplitTokenItem> = vec![];
    let chars: Vec<char> = s.chars().collect();
    let mut t: Token = Token::Unknown(String::new());
    let mut buf: String = String::new();

    fn process_buffer(buf: &String, ret: &mut Vec<SplitTokenItem>) -> Result<(), LexerError> {
        match classify_token(&buf) {
            Ok(ctoken) => {
                ret.push(SplitTokenItem(ctoken, buf.len()));
                Ok(())
            }
            Err(e) => {
                println!("In split token, classify_token failed {} : {:?}. ", buf, e);
                Err(e)
            }
        }
    }

    for c in chars {
        let put_token: bool;
        let len: usize;

        if c.is_ascii_alphanumeric() || c == '_' {
            buf.push(c);
            len = 0;
            put_token = false;
        } else if c == ';' {
            t = Token::Semicolon;
            len = 1;
            put_token = true;
        } else if c == '-' {
            t = Token::Operation(OpType::Negation);
            len = 1;
            put_token = true;
        } else if c == '!' {
            t = Token::Operation(OpType::LogicalNegation);
            len = 1;
            put_token = true;
        } else if c == '~' {
            t = Token::Operation(OpType::BitwiseComplement);
            len = 1;
            put_token = true;
        } else if c == '+' {
            t = Token::Operation(OpType::Add);
            len = 1;
            put_token = true;
        } else if c == '*' {
            t = Token::Operation(OpType::Mul);
            len = 1;
            put_token = true;
        } else if c == '/' {
            t = Token::Operation(OpType::Div);
            len = 1;
            put_token = true;
        } else {
            match char_to_bracetype(c) {
                Ok(bt) => {
                    t = Token::Brace(bt);
                    len = 1;
                    put_token = true;
                }
                Err(e) => {
                    println!("In split token, char_to_bracetype failed {} : {:?}. ", c, e);
                    t = Token::Unknown(c.to_string());
                    len = 1;
                    put_token = true;
                }
            };
        }

        if put_token {
            if !buf.is_empty() {
                process_buffer(&buf, &mut ret).unwrap();
            }

            ret.push(SplitTokenItem(t, len));
            t = Token::Unknown(String::new());
            buf.clear();
        }
    }

    if !buf.is_empty() {
        process_buffer(&buf, &mut ret).unwrap();
    }

    Ok(ret)
}

pub fn char_to_bracetype(c: char) -> Result<BraceType, LexerError> {
    match c {
        '[' => Ok(BraceType::OpenSquare),
        ']' => Ok(BraceType::CloseSquare),
        '(' => Ok(BraceType::OpenRound),
        ')' => Ok(BraceType::CloseRound),
        '{' => Ok(BraceType::OpenCurly),
        '}' => Ok(BraceType::CloseCurly),
        _ => Err(LexerError::UnknownChar),
    }
}

pub fn classify_token(s: &str) -> Result<Token, LexerError> {
    if s.is_empty() {
        return Err(LexerError::Empty);
    }

    let first = s.chars().nth(0).unwrap();

    if first.is_ascii_digit() {
        Ok(Token::UIntLiteral(s.to_string()))
    } else {
        lex_to_keyword(s).or_else(|_| lex_to_identifier(s))
    }
}

pub fn lex_to_keyword(s: &str) -> Result<Token, LexerError> {
    if is_keyword(s) {
        Ok(Token::Keyword(s.to_string()))
    } else {
        Err(LexerError::NotAKeyword)
    }
}

pub fn lex_to_identifier(s: &str) -> Result<Token, LexerError> {
    Ok(Token::Identifier(s.to_string()))
}

pub fn maybe_unary_op(t : &Token) -> bool {
    if let Token::Operation(op) = t {
        match op {
            OpType::Negation | OpType::BitwiseComplement | OpType::LogicalNegation => true,
            _ => false
        }
    } else {
        false
    } 
}