//
// Created by 86135 on 2025-06-06.
//

#ifndef COMPILER_CCV2_TEST_H
#define COMPILER_CCV2_TEST_H
#include "lexer.h"
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>

// 测试函数
void testLexer(const char* filePath) {
    // 打开文件
    FILE* file = fopen(filePath, "r");
    if (file == NULL) {
        perror("Failed to open file");
        return;
    }

    // 获取文件大小
    fseek(file, 0, SEEK_END);
    long fileSize = ftell(file);
    fseek(file, 0, SEEK_SET);

    // 分配内存来存储文件内容
    char* source = (char*)malloc(fileSize + 1);
    if (source == NULL) {
        perror("Failed to allocate memory");
        fclose(file);
        return;
    }

    // 读取文件内容
    size_t bytesRead = fread(source, 1, fileSize, file);
    source[bytesRead] = '\0';

    // 关闭文件
    fclose(file);

    // 初始化词法分析器
    initLexer(source);

    // 开始词法分析
    Token token;
    do {
        // 获取下一个 Token
        token = lexerIdentifier();

        // 打印 Token 信息
        const char* tokenTypeName;
        switch (token.type) {
            // 单字符 Token
            case TOKEN_LEFT_PAREN: tokenTypeName = "TOKEN_LEFT_PAREN"; break;
            case TOKEN_RIGHT_PAREN: tokenTypeName = "TOKEN_RIGHT_PAREN"; break;
            case TOKEN_LEFT_BRACKET: tokenTypeName = "TOKEN_LEFT_BRACKET"; break;
            case TOKEN_RIGHT_BRACKET: tokenTypeName = "TOKEN_RIGHT_BRACKET"; break;
            case TOKEN_LEFT_BRACE: tokenTypeName = "TOKEN_LEFT_BRACE"; break;
            case TOKEN_RIGHT_BRACE: tokenTypeName = "TOKEN_RIGHT_BRACE"; break;
            case TOKEN_COMMA: tokenTypeName = "TOKEN_COMMA"; break;
            case TOKEN_DOT: tokenTypeName = "TOKEN_DOT"; break;
            case TOKEN_SEMICOLON: tokenTypeName = "TOKEN_SEMICOLON"; break;
            case TOKEN_TILDE: tokenTypeName = "TOKEN_TILDE"; break;
                // 单字符或双字符 Token
            case TOKEN_PLUS: tokenTypeName = "TOKEN_PLUS"; break;
            case TOKEN_PLUS_PLUS: tokenTypeName = "TOKEN_PLUS_PLUS"; break;
            case TOKEN_PLUS_EQUAL: tokenTypeName = "TOKEN_PLUS_EQUAL"; break;
            case TOKEN_MINUS: tokenTypeName = "TOKEN_MINUS"; break;
            case TOKEN_MINUS_MINUS: tokenTypeName = "TOKEN_MINUS_MINUS"; break;
            case TOKEN_MINUS_EQUAL: tokenTypeName = "TOKEN_MINUS_EQUAL"; break;
            case TOKEN_MINUS_GREATER: tokenTypeName = "TOKEN_MINUS_GREATER"; break;
            case TOKEN_STAR: tokenTypeName = "TOKEN_STAR"; break;
            case TOKEN_STAR_EQUAL: tokenTypeName = "TOKEN_STAR_EQUAL"; break;
            case TOKEN_SLASH: tokenTypeName = "TOKEN_SLASH"; break;
            case TOKEN_SLASH_EQUAL: tokenTypeName = "TOKEN_SLASH_EQUAL"; break;
            case TOKEN_PERCENT: tokenTypeName = "TOKEN_PERCENT"; break;
            case TOKEN_PERCENT_EQUAL: tokenTypeName = "TOKEN_PERCENT_EQUAL"; break;
            case TOKEN_AMPER: tokenTypeName = "TOKEN_AMPER"; break;
            case TOKEN_AMPER_EQUAL: tokenTypeName = "TOKEN_AMPER_EQUAL"; break;
            case TOKEN_AMPER_AMPER: tokenTypeName = "TOKEN_AMPER_AMPER"; break;
            case TOKEN_PIPE: tokenTypeName = "TOKEN_PIPE"; break;
            case TOKEN_PIPE_EQUAL: tokenTypeName = "TOKEN_PIPE_EQUAL"; break;
            case TOKEN_PIPE_PIPE: tokenTypeName = "TOKEN_PIPE_PIPE"; break;
            case TOKEN_HAT: tokenTypeName = "TOKEN_HAT"; break;
            case TOKEN_HAT_EQUAL: tokenTypeName = "TOKEN_HAT_EQUAL"; break;
            case TOKEN_EQUAL: tokenTypeName = "TOKEN_EQUAL"; break;
            case TOKEN_EQUAL_EQUAL: tokenTypeName = "TOKEN_EQUAL_EQUAL"; break;
            case TOKEN_BANG: tokenTypeName = "TOKEN_BANG"; break;
            case TOKEN_BANG_EQUAL: tokenTypeName = "TOKEN_BANG_EQUAL"; break;
            case TOKEN_LESS: tokenTypeName = "TOKEN_LESS"; break;
            case TOKEN_LESS_EQUAL: tokenTypeName = "TOKEN_LESS_EQUAL"; break;
            case TOKEN_LESS_LESS: tokenTypeName = "TOKEN_LESS_LESS"; break;
            case TOKEN_GREATER: tokenTypeName = "TOKEN_GREATER"; break;
            case TOKEN_GREATER_EQUAL: tokenTypeName = "TOKEN_GREATER_EQUAL"; break;
            case TOKEN_GREATER_GREATER: tokenTypeName = "TOKEN_GREATER_GREATER"; break;
                // 字面值
            case TOKEN_IDENTIFIER: tokenTypeName = "TOKEN_IDENTIFIER"; break;
            case TOKEN_CHARACTER: tokenTypeName = "TOKEN_CHARACTER"; break;
            case TOKEN_STRING: tokenTypeName = "TOKEN_STRING"; break;
            case TOKEN_NUMBER: tokenTypeName = "TOKEN_NUMBER"; break;
                // 关键字
            case TOKEN_SIGNED: tokenTypeName = "TOKEN_SIGNED"; break;
            case TOKEN_UNSIGNED: tokenTypeName = "TOKEN_UNSIGNED"; break;
            case TOKEN_CHAR: tokenTypeName = "TOKEN_CHAR"; break;
            case TOKEN_SHORT: tokenTypeName = "TOKEN_SHORT"; break;
            case TOKEN_INT: tokenTypeName = "TOKEN_INT"; break;
            case TOKEN_LONG: tokenTypeName = "TOKEN_LONG"; break;
            case TOKEN_FLOAT: tokenTypeName = "TOKEN_FLOAT"; break;
            case TOKEN_DOUBLE: tokenTypeName = "TOKEN_DOUBLE"; break;
            case TOKEN_STRUCT: tokenTypeName = "TOKEN_STRUCT"; break;
            case TOKEN_UNION: tokenTypeName = "TOKEN_UNION"; break;
            case TOKEN_ENUM: tokenTypeName = "TOKEN_ENUM"; break;
            case TOKEN_VOID: tokenTypeName = "TOKEN_VOID"; break;
            case TOKEN_IF: tokenTypeName = "TOKEN_IF"; break;
            case TOKEN_ELSE: tokenTypeName = "TOKEN_ELSE"; break;
            case TOKEN_SWITCH: tokenTypeName = "TOKEN_SWITCH"; break;
            case TOKEN_CASE: tokenTypeName = "TOKEN_CASE"; break;
            case TOKEN_DEFAULT: tokenTypeName = "TOKEN_DEFAULT"; break;
            case TOKEN_WHILE: tokenTypeName = "TOKEN_WHILE"; break;
            case TOKEN_DO: tokenTypeName = "TOKEN_DO"; break;
            case TOKEN_FOR: tokenTypeName = "TOKEN_FOR"; break;
            case TOKEN_BREAK: tokenTypeName = "TOKEN_BREAK"; break;
            case TOKEN_CONTINUE: tokenTypeName = "TOKEN_CONTINUE"; break;
            case TOKEN_RETURN: tokenTypeName = "TOKEN_RETURN"; break;
            case TOKEN_GOTO: tokenTypeName = "TOKEN_GOTO"; break;
            case TOKEN_CONST: tokenTypeName = "TOKEN_CONST"; break;
            case TOKEN_SIZEOF: tokenTypeName = "TOKEN_SIZEOF"; break;
            case TOKEN_TYPEDEF: tokenTypeName = "TOKEN_TYPEDEF"; break;
                // 辅助 Token
            case TOKEN_ERROR: tokenTypeName = "TOKEN_ERROR"; break;
            case TOKEN_EOF: tokenTypeName = "TOKEN_EOF"; break;
            default: tokenTypeName = "UNKNOWN"; break;
        }

        printf("{%-20s  %-25.*s  \tLine: %-5d}\n",
               tokenTypeName, token.length, token.start, token.line);
    } while (token.type != TOKEN_EOF);

    // 释放内存
    free(source);
}

#include "parser.h"
#include "ast_printer.h"
#include <stdio.h>

void printEscapedString(const char* str) {
    while (*str) {
        switch (*str) {
            case '\n': printf("\\n"); break;
            case '\t': printf("\\t"); break;
            case '\r': printf("\\r"); break;
            case '\\': printf("\\\\"); break;
            case '\"': printf("\\\""); break;
            default: putchar(*str);
        }
        str++;
    }
}
void testParser(const char* name, const char* source) {
    printf("===== TEST: %s =====\n", name);
    printf("Source:\n\"");
    printEscapedString(source);
    printf("\"\n");

    // 重置全局状态
    resetLexer();

    ASTNode* ast = parse(source);
    if (ast == NULL) {
        printf("Error: Parsing failed\n");
        return;
    }

    printf("\nAST:\n");
    printAST(ast, 0);

    freeASTNode(ast);
    printf("\n");
}

// test.c 新增函数实现
void testParserFromFile(const char* name, const char* filePath) {
    // 打开文件
    FILE* file = fopen(filePath, "r");
    if (file == NULL) {
        perror("Failed to open file");
        return;
    }

    // 获取文件大小
    fseek(file, 0, SEEK_END);
    long fileSize = ftell(file);
    fseek(file, 0, SEEK_SET);

    // 分配内存来存储文件内容
    char* source = (char*)malloc(fileSize + 1);
    if (source == NULL) {
        perror("Failed to allocate memory");
        fclose(file);
        return;
    }

    // 读取文件内容
    size_t bytesRead = fread(source, 1, fileSize, file);
    source[bytesRead] = '\0';

    // 关闭文件
    fclose(file);

    // 重置全局状态
    resetLexer();

    // 调用现有的testParser函数
    testParser(name, source);

    // 释放内存
    free(source);
}


#include <stdio.h>
#include <stdlib.h>
#include "parser.h"
#include "semantic_analyzer.h"
#include "symbol_table.h"

// 测试文件的语义分析
void testSemanticAnalysisFromFile(const char* filePath) {
    // 打开文件
    FILE* file = fopen(filePath, "r");
    if (file == NULL) {
        perror("Failed to open file");
        return;
    }

    // 获取文件大小
    fseek(file, 0, SEEK_END);
    long fileSize = ftell(file);
    fseek(file, 0, SEEK_SET);

    // 分配内存来存储文件内容
    char* source = (char*)malloc(fileSize + 1);
    if (source == NULL) {
        perror("Failed to allocate memory");
        fclose(file);
        return;
    }

    // 读取文件内容
    size_t bytesRead = fread(source, 1, fileSize, file);
    source[bytesRead] = '\0';

    // 关闭文件
    fclose(file);

    printf("===== TEST: %s =====\n", filePath);
    printf("Source:\n\"");
    // 这里可以使用 printEscapedString 函数打印转义后的字符串
    printf("%s", source);
    printf("\"\n");

    // 解析源代码生成AST
    ASTNode* ast = parse(source);
    if (ast == NULL) {
        printf("Error: Parsing failed\n");
        free(source);
        return;
    }

    // 初始化符号表
    SymbolTable* table = initSymbolTable(NULL);

    // 执行语义分析
    int result = semanticAnalysis(ast, table);
    if (result) {
        printf("Semantic analysis passed.\n");
    } else {
        printf("Semantic analysis failed.\n");
    }

    // 释放资源
    freeASTNode(ast);
    destroySymbolTable(table);
    free(source);
    printf("\n");
}
#endif //COMPILER_CCV2_TEST_H
