// 词法分析器 - 不依赖 std
const cz_std = @import("cz_std.zig");
const array_list = @import("array_list.zig");
const ArrayList = array_list.ArrayList;
const Allocator = cz_std.Allocator;

pub const TokenType = enum {
    // 关键字
    keyword_import,
    keyword_pub,
    keyword_fn,
    keyword_const,
    keyword_return,
    keyword_try,
    keyword_struct,
    keyword_enum,
    keyword_union,
    keyword_actor,
    keyword_receive,
    keyword_match,
    keyword_case,
    keyword_if,
    keyword_else,
    keyword_while,
    keyword_for,
    keyword_break,
    keyword_continue,
    keyword_defer,
    keyword_unreachable,
    keyword_comptime,
    keyword_export,
    keyword_extern,
    keyword_inline,
    keyword_noalias,
    keyword_nosuspend,
    keyword_override,
    keyword_suspend,
    keyword_async,
    keyword_await,
    keyword_resume,
    keyword_anytype,
    keyword_anyframe,
    keyword_asm,
    keyword_callconv,
    keyword_var,
    keyword_usingnamespace,
    keyword_test,
    keyword_threadlocal,
    keyword_allowzero,
    keyword_nullable,
    
    // 标识符和字面量
    identifier,
    integer_literal,
    float_literal,
    string_literal,
    char_literal,
    
    // 操作符
    plus,
    minus,
    star,
    slash,
    percent,
    equal,
    equal_equal,
    bang,
    bang_equal,
    less,
    less_equal,
    greater,
    greater_equal,
    ampersand,
    pipe,
    caret,
    tilde,
    less_less,
    greater_greater,
    plus_plus,
    minus_minus,
    plus_equal,
    minus_equal,
    star_equal,
    slash_equal,
    percent_equal,
    ampersand_equal,
    pipe_equal,
    caret_equal,
    less_less_equal,
    greater_greater_equal,
    dot,
    dot_dot,
    dot_dot_dot,
    arrow,
    fat_arrow, // =>
    question,
    ellipsis,
    
    // 分隔符
    lparen,        // (
    rparen,        // )
    lbrace,        // {
    rbrace,        // }
    lbracket,      // [
    rbracket,      // ]
    semicolon,     // ;
    comma,         // ,
    colon,         // :
    hash,          // #
    at,            // @
    backslash,     // \
    eof,
};

pub const Token = struct {
    type: TokenType,
    lexeme: []const u8,
    line: usize,
    column: usize,
};

const Lexer = struct {
    source: []const u8,
    position: usize,
    line: usize,
    column: usize,
    allocator: Allocator,
    
    fn init(allocator: Allocator, source: []const u8) Lexer {
        return Lexer{
            .source = source,
            .position = 0,
            .line = 1,
            .column = 1,
            .allocator = allocator,
        };
    }
    
    fn isAtEnd(self: *const Lexer) bool {
        return self.position >= self.source.len;
    }
    
    fn advance(self: *Lexer) u8 {
        const char = self.source[self.position];
        self.position += 1;
        
        if (char == '\n') {
            self.line += 1;
            self.column = 1;
        } else {
            self.column += 1;
        }
        
        return char;
    }
    
    fn peek(self: *const Lexer) u8 {
        if (self.isAtEnd()) return 0;
        return self.source[self.position];
    }
    
    fn peekNext(self: *const Lexer) u8 {
        if (self.position + 1 >= self.source.len) return 0;
        return self.source[self.position + 1];
    }
    
    fn match(self: *Lexer, expected: u8) bool {
        if (self.isAtEnd()) return false;
        if (self.source[self.position] != expected) return false;
        
        self.position += 1;
        if (expected == '\n') {
            self.line += 1;
            self.column = 1;
        } else {
            self.column += 1;
        }
        return true;
    }
    
    fn skipWhitespace(self: *Lexer) void {
        while (!self.isAtEnd()) {
            const char = self.peek();
            switch (char) {
                ' ', '\r', '\t' => {
                    _ = self.advance();
                },
                '\n' => {
                    _ = self.advance();
                },
                '/' => {
                    // 处理注释
                    if (self.peekNext() == '/') {
                        // 单行注释
                        while (!self.isAtEnd() and self.peek() != '\n') {
                            _ = self.advance();
                        }
                    } else if (self.peekNext() == '*') {
                        // 多行注释
                        _ = self.advance(); // 跳过 /
                        _ = self.advance(); // 跳过 *
                        while (!self.isAtEnd() and !(self.peek() == '*' and self.peekNext() == '/')) {
                            _ = self.advance();
                        }
                        if (!self.isAtEnd()) {
                            _ = self.advance(); // 跳过 *
                            _ = self.advance(); // 跳过 /
                        }
                    } else {
                        break;
                    }
                },
                else => break,
            }
        }
    }
    
    fn readIdentifier(self: *Lexer) []const u8 {
        const start = self.position;
        while (!self.isAtEnd() and isAlphaNumeric(self.peek())) {
            _ = self.advance();
        }
        return self.source[start..self.position];
    }
    
    fn readNumber(self: *Lexer) []const u8 {
        const start = self.position;
        
        // 读取整数部分
        while (!self.isAtEnd() and isDigit(self.peek())) {
            _ = self.advance();
        }
        
        // 检查小数点
        if (!self.isAtEnd() and self.peek() == '.' and 
            self.position + 1 < self.source.len and 
            isDigit(self.source[self.position + 1])) {
            _ = self.advance(); // 跳过 .
            
            // 读取小数部分
            while (!self.isAtEnd() and isDigit(self.peek())) {
                _ = self.advance();
            }
        }
        
        return self.source[start..self.position];
    }
    
    fn readString(self: *Lexer) ![]const u8 {
        _ = self.advance(); // 跳过开始的引号
        
        const start = self.position;
        while (!self.isAtEnd() and self.peek() != '"') {
            if (self.peek() == '\\') {
                _ = self.advance(); // 跳过转义字符
            }
            _ = self.advance();
        }
        
        const result = self.source[start..self.position];
        
        if (!self.isAtEnd() and self.peek() == '"') {
            _ = self.advance(); // 跳过结束的引号
        }
        
        return result;
    }
};

fn isAlpha(c: u8) bool {
    return (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z') or c == '_';
}

fn isDigit(c: u8) bool {
    return c >= '0' and c <= '9';
}

fn isAlphaNumeric(c: u8) bool {
    return isAlpha(c) or isDigit(c);
}

fn getKeywordType(lexeme: []const u8) TokenType {
    // 检查关键字
    if (cz_std.eql(lexeme, "import")) return .keyword_import;
    if (cz_std.eql(lexeme, "pub")) return .keyword_pub;
    if (cz_std.eql(lexeme, "fn")) return .keyword_fn;
    if (cz_std.eql(lexeme, "const")) return .keyword_const;
    if (cz_std.eql(lexeme, "return")) return .keyword_return;
    if (cz_std.eql(lexeme, "try")) return .keyword_try;
    if (cz_std.eql(lexeme, "struct")) return .keyword_struct;
    if (cz_std.eql(lexeme, "enum")) return .keyword_enum;
    if (cz_std.eql(lexeme, "union")) return .keyword_union;
    if (cz_std.eql(lexeme, "actor")) return .keyword_actor;
    if (cz_std.eql(lexeme, "receive")) return .keyword_receive;
    if (cz_std.eql(lexeme, "match")) return .keyword_match;
    if (cz_std.eql(lexeme, "case")) return .keyword_case;
    if (cz_std.eql(lexeme, "if")) return .keyword_if;
    if (cz_std.eql(lexeme, "else")) return .keyword_else;
    if (cz_std.eql(lexeme, "while")) return .keyword_while;
    if (cz_std.eql(lexeme, "for")) return .keyword_for;
    if (cz_std.eql(lexeme, "break")) return .keyword_break;
    if (cz_std.eql(lexeme, "continue")) return .keyword_continue;
    if (cz_std.eql(lexeme, "defer")) return .keyword_defer;
    if (cz_std.eql(lexeme, "unreachable")) return .keyword_unreachable;
    if (cz_std.eql(lexeme, "comptime")) return .keyword_comptime;
    if (cz_std.eql(lexeme, "export")) return .keyword_export;
    if (cz_std.eql(lexeme, "extern")) return .keyword_extern;
    if (cz_std.eql(lexeme, "inline")) return .keyword_inline;
    if (cz_std.eql(lexeme, "noalias")) return .keyword_noalias;
    if (cz_std.eql(lexeme, "nosuspend")) return .keyword_nosuspend;
    if (cz_std.eql(lexeme, "override")) return .keyword_override;
    if (cz_std.eql(lexeme, "suspend")) return .keyword_suspend;
    if (cz_std.eql(lexeme, "async")) return .keyword_async;
    if (cz_std.eql(lexeme, "await")) return .keyword_await;
    if (cz_std.eql(lexeme, "resume")) return .keyword_resume;
    if (cz_std.eql(lexeme, "anytype")) return .keyword_anytype;
    if (cz_std.eql(lexeme, "anyframe")) return .keyword_anyframe;
    if (cz_std.eql(lexeme, "asm")) return .keyword_asm;
    if (cz_std.eql(lexeme, "callconv")) return .keyword_callconv;
    if (cz_std.eql(lexeme, "var")) return .keyword_var;
    if (cz_std.eql(lexeme, "usingnamespace")) return .keyword_usingnamespace;
    if (cz_std.eql(lexeme, "test")) return .keyword_test;
    if (cz_std.eql(lexeme, "threadlocal")) return .keyword_threadlocal;
    if (cz_std.eql(lexeme, "allowzero")) return .keyword_allowzero;
    if (cz_std.eql(lexeme, "nullable")) return .keyword_nullable;
    
    return .identifier;
}

pub fn tokenize(allocator: Allocator, source: []const u8) ![]Token {
    var lexer = Lexer.init(allocator, source);
    // 使用我们自己的 ArrayList
    var tokens = ArrayList(Token).init(allocator);
    defer tokens.deinit();
    
    while (!lexer.isAtEnd()) {
        lexer.skipWhitespace();
        
        if (lexer.isAtEnd()) break;
        
        const start_pos = lexer.position;
        const start_line = lexer.line;
        const start_column = lexer.column;
        const char = lexer.advance();
        
        const token_type: TokenType = switch (char) {
            // 单字符标记
            '(' => .lparen,
            ')' => .rparen,
            '{' => .lbrace,
            '}' => .rbrace,
            '[' => .lbracket,
            ']' => .rbracket,
            ';' => .semicolon,
            ',' => .comma,
            '.' => .dot,
            ':' => .colon,
            '#' => .hash,
            '@' => .at,
            '\\' => .backslash,
            
            // 可能是多字符的操作符
            '+' => if (lexer.match('=')) .plus_equal else if (lexer.match('+')) .plus_plus else .plus,
            '-' => if (lexer.match('=')) .minus_equal else if (lexer.match('-')) .minus_minus else .minus,
            '*' => if (lexer.match('=')) .star_equal else .star,
            '/' => if (lexer.match('=')) .slash_equal else .slash,
            '%' => if (lexer.match('=')) .percent_equal else .percent,
            '!' => if (lexer.match('=')) .bang_equal else .bang,
            '=' => if (lexer.match('=')) .equal_equal else if (lexer.match('>')) .fat_arrow else .equal,
            '<' => if (lexer.match('=')) .less_equal else if (lexer.match('<')) .less_less else .less,
            '>' => if (lexer.match('=')) .greater_equal else if (lexer.match('>')) .greater_greater else .greater,
            '&' => if (lexer.match('=')) .ampersand_equal else .ampersand,
            '|' => if (lexer.match('=')) .pipe_equal else .pipe,
            '^' => if (lexer.match('=')) .caret_equal else .caret,
            '~' => .tilde,
            '?' => .question,
            
            '"' => {
                const string_content = try lexer.readString();
                try tokens.append(.{
                    .type = .string_literal,
                    .lexeme = string_content,
                    .line = start_line,
                    .column = start_column,
                });
                continue;
            },
            
            '\'' => {
                // 字符字面量
                const char_content = if (!lexer.isAtEnd() and lexer.peek() != '\'') blk: {
                    const c = lexer.advance();
                    if (c == '\\' and !lexer.isAtEnd()) {
                        _ = lexer.advance(); // 跳过转义字符
                    }
                    break :blk lexer.source[start_pos + 1..lexer.position];
                } else "";
                
                if (!lexer.isAtEnd() and lexer.peek() == '\'') {
                    _ = lexer.advance(); // 跳过结束的引号
                }
                
                try tokens.append(.{
                    .type = .char_literal,
                    .lexeme = char_content,
                    .line = start_line,
                    .column = start_column,
                });
                continue;
            },
            
            else => {
                if (isAlpha(char)) {
                    lexer.position -= 1; // 回退一位，让readIdentifier重新读取
                    const identifier = lexer.readIdentifier();
                    const keyword_type = getKeywordType(identifier);
                    try tokens.append(.{
                        .type = keyword_type,
                        .lexeme = identifier,
                        .line = start_line,
                        .column = start_column,
                    });
                    continue;
                } else if (isDigit(char)) {
                    lexer.position -= 1; // 回退一位，让readNumber重新读取
                    const number = lexer.readNumber();
                    // 简单判断是整数还是浮点数
                    const has_dot = cz_std.indexOf(number, ".") != null;
                    const token_type = if (has_dot) TokenType.float_literal else TokenType.integer_literal;
                    try tokens.append(.{
                        .type = token_type,
                        .lexeme = number,
                        .line = start_line,
                        .column = start_column,
                    });
                    continue;
                } else {
                    // 未知字符，暂时跳过
                    continue;
                }
            },
        };
        
        // 对于单字符标记，添加到tokens中
        if (char != '"' and char != '\'') {
            try tokens.append(.{
                .type = token_type,
                .lexeme = lexer.source[start_pos..lexer.position],
                .line = start_line,
                .column = start_column,
            });
        }
    }
    
    // 添加EOF标记
    try tokens.append(.{
        .type = .eof,
        .lexeme = "",
        .line = lexer.line,
        .column = lexer.column,
    });
    
    return try tokens.toOwnedSlice();
}