//! 词法分析器
//! 
//! 基于nom实现的词法分析器，负责将输入文本转换为token流

use nom::{
    branch::alt,
    bytes::complete::{tag, take_while1, take_until},
    character::complete::{alpha1, alphanumeric1, char, digit1, multispace0, multispace1},
    combinator::{map, opt, recognize},
    multi::{many0, many1},
    sequence::{delimited, pair, preceded, terminated},
    IResult,
};
use serde::{Deserialize, Serialize};

/// Token类型
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum Token {
    // 关键字
    Var,
    Let,
    Const,
    If,
    Else,
    For,
    While,
    Function,
    Return,
    Break,
    Continue,
    True,
    False,
    Null,
    
    // 标识符和字面量
    Identifier(String),
    Integer(i64),
    Float(f64),
    String(String),
    
    // 操作符
    Plus,
    Minus,
    Multiply,
    Divide,
    Modulo,
    Power,
    Assign,
    Equal,
    NotEqual,
    Less,
    LessEqual,
    Greater,
    GreaterEqual,
    And,
    Or,
    Not,
    
    // 分隔符
    LeftParen,
    RightParen,
    LeftBrace,
    RightBrace,
    LeftBracket,
    RightBracket,
    Comma,
    Semicolon,
    Dot,
    Colon,
    
    // 特殊
    Newline,
    Eof,
    Comment(String),
}

/// 词法分析器
pub struct Lexer {
    input: String,
    position: usize,
    tokens: Vec<Token>,
}

impl Lexer {
    /// 创建新的词法分析器
    pub fn new(input: String) -> Self {
        Self {
            input,
            position: 0,
            tokens: Vec::new(),
        }
    }
    
    /// 执行词法分析
    pub fn tokenize(&mut self) -> Result<Vec<Token>, String> {
        let mut remaining = self.input.as_str();
        
        while !remaining.is_empty() {
            // 跳过空白字符
            let (rest, _): (&str, &str) = multispace0::<&str, nom::error::Error<&str>>(remaining).map_err(|e| format!("解析空白字符失败: {}", e))?;
            remaining = rest;
            
            if remaining.is_empty() {
                break;
            }
            
            // 尝试解析token
            let (rest, token) = self.parse_token(remaining)?;
            remaining = rest;
            
            // 添加token（跳过注释）
            if !matches!(token, Token::Comment(_)) {
                self.tokens.push(token);
            }
        }
        
        // 添加EOF token
        self.tokens.push(Token::Eof);
        
        Ok(self.tokens.clone())
    }
    
    /// 解析单个token
    fn parse_token<'a>(&self, input: &'a str) -> Result<(&'a str, Token), String> {
        alt((
            |i| self.parse_comment(i),
            |i| self.parse_keyword(i),
            |i| self.parse_identifier(i),
            |i| self.parse_number(i),
            |i| self.parse_string(i),
            |i| self.parse_operator(i),
            |i| self.parse_delimiter(i),
        ))(input)
        .map_err(|e| format!("解析token失败: {}", e))
    }
    
    /// 解析注释
    fn parse_comment<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        map(
            preceded(tag("//"), nom::bytes::complete::take_while(|c| c != '\n')),
            |comment: &str| Token::Comment(comment.to_string())
        )(input)
    }
    
    /// 解析关键字
    fn parse_keyword<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        let (input, word) = recognize(pair(
            alt((alpha1, tag("_"))),
            many0(alt((alphanumeric1, tag("_"))))
        ))(input)?;
        
        let token = match word {
            "var" => Token::Var,
            "let" => Token::Let,
            "const" => Token::Const,
            "if" => Token::If,
            "else" => Token::Else,
            "for" => Token::For,
            "while" => Token::While,
            "function" => Token::Function,
            "return" => Token::Return,
            "break" => Token::Break,
            "continue" => Token::Continue,
            "true" => Token::True,
            "false" => Token::False,
            "null" => Token::Null,
            _ => Token::Identifier(word.to_string()),
        };
        
        Ok((input, token))
    }
    
    /// 解析标识符
    fn parse_identifier<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        map(
            recognize(pair(
                alt((alpha1, tag("_"))),
                many0(alt((alphanumeric1, tag("_"))))
            )),
            |id: &str| Token::Identifier(id.to_string())
        )(input)
    }
    
    /// 解析数字
    fn parse_number<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        alt((
            |i| self.parse_float(i),
            |i| self.parse_integer(i),
        ))(input)
    }
    
    /// 解析整数
    fn parse_integer<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        map(
            digit1,
            |digits: &str| {
                let value = digits.parse::<i64>().unwrap_or(0);
                Token::Integer(value)
            }
        )(input)
    }
    
    /// 解析浮点数
    fn parse_float<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        map(
            recognize(pair(
                digit1,
                pair(char('.'), digit1)
            )),
            |float_str: &str| {
                let value = float_str.parse::<f64>().unwrap_or(0.0);
                Token::Float(value)
            }
        )(input)
    }
    
    /// 解析字符串
    fn parse_string<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        map(
            delimited(
                char('"'),
                take_while1(|c| c != '"'),
                char('"')
            ),
            |s: &str| Token::String(s.to_string())
        )(input)
    }
    
    /// 解析操作符
    fn parse_operator<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        alt((
            map(tag("=="), |_| Token::Equal),
            map(tag("!="), |_| Token::NotEqual),
            map(tag("<="), |_| Token::LessEqual),
            map(tag(">="), |_| Token::GreaterEqual),
            map(tag("&&"), |_| Token::And),
            map(tag("||"), |_| Token::Or),
            map(tag("**"), |_| Token::Power),
            map(tag("+"), |_| Token::Plus),
            map(tag("-"), |_| Token::Minus),
            map(tag("*"), |_| Token::Multiply),
            map(tag("/"), |_| Token::Divide),
            map(tag("%"), |_| Token::Modulo),
            map(tag("="), |_| Token::Assign),
            map(tag("<"), |_| Token::Less),
            map(tag(">"), |_| Token::Greater),
            map(tag("!"), |_| Token::Not),
        ))(input)
    }
    
    /// 解析分隔符
    fn parse_delimiter<'a>(&self, input: &'a str) -> IResult<&'a str, Token> {
        alt((
            map(char('('), |_| Token::LeftParen),
            map(char(')'), |_| Token::RightParen),
            map(char('{'), |_| Token::LeftBrace),
            map(char('}'), |_| Token::RightBrace),
            map(char('['), |_| Token::LeftBracket),
            map(char(']'), |_| Token::RightBracket),
            map(char(','), |_| Token::Comma),
            map(char(';'), |_| Token::Semicolon),
            map(char('.'), |_| Token::Dot),
            map(char(':'), |_| Token::Colon),
            map(char('\n'), |_| Token::Newline),
        ))(input)
    }
    
    /// 获取tokens
    pub fn tokens(&self) -> &[Token] {
        &self.tokens
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    
    #[test]
    fn test_tokenize_simple_expression() {
        let mut lexer = Lexer::new("var x = 42;".to_string());
        let tokens = lexer.tokenize().unwrap();
        
        assert_eq!(tokens, vec![
            Token::Var,
            Token::Identifier("x".to_string()),
            Token::Assign,
            Token::Integer(42),
            Token::Semicolon,
            Token::Eof,
        ]);
    }
    
    #[test]
    fn test_tokenize_keywords() {
        let mut lexer = Lexer::new("if else for while function return".to_string());
        let tokens = lexer.tokenize().unwrap();
        
        assert_eq!(tokens[0], Token::If);
        assert_eq!(tokens[1], Token::Else);
        assert_eq!(tokens[2], Token::For);
        assert_eq!(tokens[3], Token::While);
        assert_eq!(tokens[4], Token::Function);
        assert_eq!(tokens[5], Token::Return);
    }
    
    #[test]
    fn test_tokenize_operators() {
        let mut lexer = Lexer::new("+ - * / == != <= >=".to_string());
        let tokens = lexer.tokenize().unwrap();
        
        assert_eq!(tokens[0], Token::Plus);
        assert_eq!(tokens[1], Token::Minus);
        assert_eq!(tokens[2], Token::Multiply);
        assert_eq!(tokens[3], Token::Divide);
        assert_eq!(tokens[4], Token::Equal);
        assert_eq!(tokens[5], Token::NotEqual);
        assert_eq!(tokens[6], Token::LessEqual);
        assert_eq!(tokens[7], Token::GreaterEqual);
    }
    
    #[test]
    fn test_tokenize_numbers() {
        let mut lexer = Lexer::new("42 3.14".to_string());
        let tokens = lexer.tokenize().unwrap();
        
        assert_eq!(tokens[0], Token::Integer(42));
        assert_eq!(tokens[1], Token::Float(3.14));
    }
    
    #[test]
    fn test_tokenize_strings() {
        let mut lexer = Lexer::new(r#""hello world""#.to_string());
        let tokens = lexer.tokenize().unwrap();
        
        assert_eq!(tokens[0], Token::String("hello world".to_string()));
    }
    
    #[test]
    fn test_tokenize_comments() {
        let mut lexer = Lexer::new("var x = 42; // this is a comment".to_string());
        let tokens = lexer.tokenize().unwrap();
        
        // 注释应该被过滤掉
        assert_eq!(tokens, vec![
            Token::Var,
            Token::Identifier("x".to_string()),
            Token::Assign,
            Token::Integer(42),
            Token::Semicolon,
            Token::Eof,
        ]);
    }
}
