#include "CuTestUtils.cuh"
#include <cuda_runtime.h>
#include <iostream>
#include <string>
#include <vector>
#include <fmt/format.h>

// 自定义结构体 - 简单向量类型
template <typename T>
struct Vector3 {
    T x, y, z;
    
    // 构造函数
    __host__ __device__ Vector3() : x(0), y(0), z(0) {}
    __host__ __device__ Vector3(T x, T y, T z) : x(x), y(y), z(z) {}
    
    // 基本运算
    __host__ __device__ Vector3<T> operator+(const Vector3<T>& other) const {
        return Vector3<T>(x + other.x, y + other.y, z + other.z);
    }
    
    __host__ __device__ Vector3<T> operator*(T scalar) const {
        return Vector3<T>(x * scalar, y * scalar, z * scalar);
    }
    
    // 点积
    __host__ __device__ T dot(const Vector3<T>& other) const {
        return x * other.x + y * other.y + z * other.z;
    }
    
    // 打印功能
    friend std::ostream& operator<<(std::ostream& os, const Vector3<T>& v) {
        os << "Vector3(" << v.x << ", " << v.y << ", " << v.z << ")";
        return os;
    }
};

// 为 Vector3 特化 fmt::formatter 模板，使其支持 fmt 格式化
template <typename T>
struct fmt::formatter<Vector3<T>> : fmt::formatter<T> {
    // 解析格式说明符（继承自 fmt::formatter<T>）
    using base = fmt::formatter<T>;
    
    // 格式化 Vector3 对象
    auto format(const Vector3<T>& v, fmt::format_context& ctx) const -> fmt::format_context::iterator {
        // 开始格式化输出
        auto out = ctx.out();
        
        // 输出向量的开始部分
        out = fmt::format_to(out, "Vector3(");
        
        // 使用基本类型的格式化器来格式化 x、y、z 分量
        out = base::format(v.x, ctx);
        out = fmt::format_to(out, ", ");
        out = base::format(v.y, ctx);
        out = fmt::format_to(out, ", ");
        out = base::format(v.z, ctx);
        
        // 输出向量的结束部分
        out = fmt::format_to(out, ")");
        
        return out;
    }
};

// 自定义类 - 矩阵类型
class Matrix4x4 {
private:
    float data[16]; // 行优先存储
    
public:
    // 构造函数
    __host__ __device__ Matrix4x4() {
        // 初始化为单位矩阵
        for (int i = 0; i < 16; ++i) {
            data[i] = 0.0f;
        }
        data[0] = data[5] = data[10] = data[15] = 1.0f;
    }
    
    // 访问元素
    __host__ __device__ float& at(int row, int col) {
        return data[row * 4 + col];
    }
    
    __host__ __device__ const float& at(int row, int col) const {
        return data[row * 4 + col];
    }
    
    // 矩阵乘法
    __host__ __device__ Matrix4x4 operator*(const Matrix4x4& other) const {
        Matrix4x4 result;
        for (int i = 0; i < 4; ++i) {
            for (int j = 0; j < 4; ++j) {
                result.at(i, j) = 0.0f;
                for (int k = 0; k < 4; ++k) {
                    result.at(i, j) += at(i, k) * other.at(k, j);
                }
            }
        }
        return result;
    }
    
    // 打印功能
    friend std::ostream& operator<<(std::ostream& os, const Matrix4x4& m) {
        for (int i = 0; i < 4; ++i) {
            os << "[ ";
            for (int j = 0; j < 4; ++j) {
                os << m.at(i, j);
                if (j < 3) os << ", ";
            }
            os << " ]" << std::endl;
        }
        return os;
    }
};

// 自定义数据结构 - 链表节点
template <typename T>
struct ListNode {
    T data;
    ListNode* next;
    
    ListNode(T val) : data(val), next(nullptr) {}
};

// CUDA 内核 - 向量加法
template <typename T>
__global__ void vectorAddKernel(Vector3<T>* a, Vector3<T>* b, Vector3<T>* c, int size) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < size) {
        c[idx] = a[idx] + b[idx];
    }
}

// 测试自定义数据类型的测试类
class CustomTypesTest : public CudaTest {
public:
    std::string name() const override {
        return "Custom Types Test";
    }
    
    void run() override {
        // 测试 Vector3
        testVector3();
        
        // 测试 Matrix4x4
        testMatrix4x4();
        
        // 测试 CUDA 上的自定义类型
        testCustomTypesOnCUDA();
    }
    
private:
    void testVector3() {
        MYINFO("Testing Vector3 type...");
        
        Vector3<float> v1(1.0f, 2.0f, 3.0f);
        Vector3<float> v2(4.0f, 5.0f, 6.0f);
        
        Vector3<float> sum = v1 + v2;
        Vector3<float> scaled = v1 * 2.0f;
        float dotProduct = v1.dot(v2);
        
        // 使用 fmt 格式化 Vector3
        MYINFO("v1 = {}", v1);
        MYINFO("v2 = {}", v2);
        MYINFO("v1 + v2 = {}", sum);
        MYINFO("v1 * 2 = {}", scaled);
        MYINFO("v1 · v2 = {}", dotProduct);
        
        // 测试带格式说明符的 fmt 格式化
        MYINFO("v1 with 2 decimal places: {:.2f}", v1);
    }
    
    void testMatrix4x4() {
        MYINFO("Testing Matrix4x4 type...");
        
        Matrix4x4 mat1;
        mat1.at(0, 0) = 2.0f;
        mat1.at(1, 1) = 3.0f;
        mat1.at(2, 2) = 4.0f;
        
        Matrix4x4 mat2;
        mat2.at(0, 0) = 5.0f;
        mat2.at(1, 1) = 6.0f;
        mat2.at(2, 2) = 7.0f;
        
        Matrix4x4 product = mat1 * mat2;
        
        MYINFO("Matrix 1:");
        std::cout << mat1 << std::endl;
        MYINFO("Matrix 2:");
        std::cout << mat2 << std::endl;
        MYINFO("Matrix 1 * Matrix 2:");
        std::cout << product << std::endl;
    }
    
    void testCustomTypesOnCUDA() {
        MYINFO("Testing custom types on CUDA...");
        
        const int size = 1024;
        const int blockSize = 256;
        const int gridSize = (size + blockSize - 1) / blockSize;
        
        // 分配主机内存
        Vector3<float>* h_a = new Vector3<float>[size];
        Vector3<float>* h_b = new Vector3<float>[size];
        Vector3<float>* h_c = new Vector3<float>[size];
        
        // 初始化数据
        for (int i = 0; i < size; ++i) {
            h_a[i] = Vector3<float>(i * 1.0f, i * 2.0f, i * 3.0f);
            h_b[i] = Vector3<float>(i * 4.0f, i * 5.0f, i * 6.0f);
        }
        
        // 分配设备内存
        Vector3<float>* d_a, *d_b, *d_c;
        MYCHECK(cudaMalloc(&d_a, size * sizeof(Vector3<float>)));
        MYCHECK(cudaMalloc(&d_b, size * sizeof(Vector3<float>)));
        MYCHECK(cudaMalloc(&d_c, size * sizeof(Vector3<float>)));
        
        // 复制数据到设备
        MYCHECK(cudaMemcpy(d_a, h_a, size * sizeof(Vector3<float>), cudaMemcpyHostToDevice));
        MYCHECK(cudaMemcpy(d_b, h_b, size * sizeof(Vector3<float>), cudaMemcpyHostToDevice));
        
        // 启动内核
        vectorAddKernel<float><<<gridSize, blockSize>>>(d_a, d_b, d_c, size);
        MYCHECK(cudaDeviceSynchronize());
        
        // 复制结果回主机
        MYCHECK(cudaMemcpy(h_c, d_c, size * sizeof(Vector3<float>), cudaMemcpyDeviceToHost));
        
        // 验证结果
        bool success = true;
        for (int i = 0; i < 5; ++i) { // 只验证前5个结果
            Vector3<float> expected = h_a[i] + h_b[i];
            if (h_c[i].x != expected.x || h_c[i].y != expected.y || h_c[i].z != expected.z) {
                success = false;
                break;
            }
        }
        
        if (success) {
            MYINFO("CUDA vector addition test passed!");
            MYINFO("Sample result: h_a[0] + h_b[0] = {}", h_c[0]);
        } else {
            MYERROR("CUDA vector addition test failed!");
        }
        
        // 清理内存
        delete[] h_a;
        delete[] h_b;
        delete[] h_c;
        MYCHECK(cudaFree(d_a));
        MYCHECK(cudaFree(d_b));
        MYCHECK(cudaFree(d_c));
    }
};

// 注册测试
REGISTER_TEST(CustomTypesTest);