#include <cuda_runtime.h>
#include <device_launch_parameters.h>

#include <iostream>
#include <string>
#include <vector>

#include "CuMacro.h"
#include "CudaUtils.cuh"

//#include "../common/MyMacro.h"
// #include "../common/MyPrtFns.h"
#include "MyPrtFns.h"
#include "TestCudaBasic.h"

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx cuda infomation
// 定义测试命名空间和函数
namespace te_cuda_1
{
    std::int8_t tt()
    {
        MY_ECHOFUNPATH;
        MYINFO("Running Device Info Test");
        int dev = 0;
        cudaDeviceProp devProp;
        MYCHECK(cudaGetDeviceProperties(&devProp, dev));

        MYINFO("Using GPU device {0}: {1}", dev, devProp.name);
        MYINFO("Number of SMs: {0}", devProp.multiProcessorCount);
        MYINFO("Shared memory per thread block: {0:.2f} KB", devProp.sharedMemPerBlock / 1024.0);
        MYINFO("Max threads per thread block: {0}", devProp.maxThreadsPerBlock);
        MYINFO("Max threads per stream processor: {0}", devProp.maxThreadsPerMultiProcessor);
        MYINFO("Max warps per stream processor: {0}", devProp.maxThreadsPerMultiProcessor / 32);
        MYINFO("Max shared memory per stream processor: {0:.2f} KB", devProp.sharedMemPerMultiprocessor / 1024.0);
        MYINFO("Max registers per stream processor: {0}", devProp.regsPerMultiprocessor);
        MYINFO("Test passed: Device Info");

        return 0;
    }

    std::int8_t tt1()
    {
        MY_ECHOFUNPATH;
        MYINFO("Running Basic CUDA Test");
        // 简单的CUDA测试
        int deviceCount = 0;
        MYCHECK(cudaGetDeviceCount(&deviceCount));
        MYINFO("Number of CUDA devices: {0}", deviceCount);
        MYINFO("Test passed: Basic CUDA");
        return 0;
    }

    std::int8_t tt2()
    {
        MY_ECHOFUNPATH;
        MYINFO("Running Memory Allocation Test");
        // 内存分配测试
        int* d_data = nullptr;
        MYCHECK(cudaMalloc(&d_data, sizeof(int) * 1024));
        MYCHECK(cudaMemset(d_data, 0, sizeof(int) * 1024));
        MYCHECK(cudaFree(d_data));
        MYINFO("Test passed: Memory Allocation");
        return 0;
    }
}  // namespace te_cuda_1

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 自定义类型
namespace te_cuda_2
{
    //============================= 自定义结构体 - 简单向量类型
    template <typename T>
    struct Vector3
    {
        T x, y, z;

        // 构造函数
        __host__ __device__ Vector3() : x(0), y(0), z(0) {}
        __host__ __device__ Vector3(T x, T y, T z) : x(x), y(y), z(z) {}

        // 基本运算
        __host__ __device__ Vector3<T> operator+(const Vector3<T>& other) const { return Vector3<T>(x + other.x, y + other.y, z + other.z); }

        __host__ __device__ Vector3<T> operator*(T scalar) const { return Vector3<T>(x * scalar, y * scalar, z * scalar); }

        // 点积
        __host__ __device__ T dot(const Vector3<T>& other) const { return x * other.x + y * other.y + z * other.z; }

        // 打印功能
        friend std::ostream& operator<<(std::ostream& os, const Vector3<T>& v)
        {
            os << "Vector3(" << v.x << ", " << v.y << ", " << v.z << ")";
            return os;
        }
    };

}  // namespace te_cuda_2

//============================= 为 Vector3 特化 fmt::formatter 模板，使其支持 fmt 格式化
namespace fmt
{
    template <typename T>
    struct formatter<te_cuda_2::Vector3<T>> : formatter<T>
    {
        // 解析格式说明符（继承自 fmt::formatter<T>）
        using base = formatter<T>;

        // 格式化 Vector3 对象
        auto format(const te_cuda_2::Vector3<T>& v, format_context& ctx) const -> format_context::iterator
        {
            // 开始格式化输出
            auto out = ctx.out();

            // 输出向量的开始部分
            out = format_to(out, "Vector3(");

            // 使用基本类型的格式化器来格式化 x、y、z 分量
            out = base::format(v.x, ctx);
            out = format_to(out, ", ");
            out = base::format(v.y, ctx);
            out = format_to(out, ", ");
            out = base::format(v.z, ctx);

            // 输出向量的结束部分
            out = format_to(out, ")");

            return out;
        }
    };
}  // namespace fmt

namespace te_cuda_2
{

    //============================= 自定义结构体 - 简单向量类型 自定义类 - 矩阵类型
    class Matrix4x4
    {
    private:
        float data[16];  // 行优先存储

    public:
        // 构造函数
        __host__ __device__ Matrix4x4()
        {
            // 初始化为单位矩阵
            for (int i = 0; i < 16; ++i)
            {
                data[i] = 0.0f;
            }
            data[0] = data[5] = data[10] = data[15] = 1.0f;
        }

        // 访问元素
        __host__ __device__ float& at(int row, int col) { return data[row * 4 + col]; }

        __host__ __device__ const float& at(int row, int col) const { return data[row * 4 + col]; }

        // 矩阵乘法
        __host__ __device__ Matrix4x4 operator*(const Matrix4x4& other) const
        {
            Matrix4x4 result;
            for (int i = 0; i < 4; ++i)
            {
                for (int j = 0; j < 4; ++j)
                {
                    result.at(i, j) = 0.0f;
                    for (int k = 0; k < 4; ++k)
                    {
                        result.at(i, j) += at(i, k) * other.at(k, j);
                    }
                }
            }
            return result;
        }

        //============================= 打印功能
        friend std::ostream& operator<<(std::ostream& os, const Matrix4x4& m)
        {
            for (int i = 0; i < 4; ++i)
            {
                os << "[ ";
                for (int j = 0; j < 4; ++j)
                {
                    os << m.at(i, j);
                    if (j < 3) os << ", ";
                }
                os << " ]" << std::endl;
            }
            return os;
        }
    };

    //============================= 自定义数据结构 - 链表节点
    template <typename T>
    struct ListNode
    {
        T data;
        ListNode* next;

        ListNode(T val) : data(val), next(nullptr) {}
    };

    //============================= CUDA 内核 - 向量加法
    template <typename T>
    __global__ void vectorAddKernel(Vector3<T>* a, Vector3<T>* b, Vector3<T>* c, int size)
    {
        int idx = blockIdx.x * blockDim.x + threadIdx.x;
        if (idx < size)
        {
            c[idx] = a[idx] + b[idx];
        }
    }

    void testVector3()
    {
        MY_ECHOFUNPATH;
        MYINFO("Testing Vector3 type...");

        Vector3<float> v1(1.0f, 2.0f, 3.0f);
        Vector3<float> v2(4.0f, 5.0f, 6.0f);

        Vector3<float> sum = v1 + v2;
        Vector3<float> scaled = v1 * 2.0f;
        float dotProduct = v1.dot(v2);

        // 使用 fmt 格式化 Vector3
        MYINFO("v1 = {}", v1);
        MYINFO("v2 = {}", v2);
        MYINFO("v1 + v2 = {}", sum);
        MYINFO("v1 * 2 = {}", scaled);
        MYINFO("v1 · v2 = {}", dotProduct);

        // 测试带格式说明符的 fmt 格式化
        MYINFO("v1 with 2 decimal places: {:.2f}", v1);
    }

    void testMatrix4x4()
    {
        MY_ECHOFUNPATH;
        MYINFO("Testing Matrix4x4 type...");

        Matrix4x4 mat1;
        mat1.at(0, 0) = 2.0f;
        mat1.at(1, 1) = 3.0f;
        mat1.at(2, 2) = 4.0f;

        Matrix4x4 mat2;
        mat2.at(0, 0) = 5.0f;
        mat2.at(1, 1) = 6.0f;
        mat2.at(2, 2) = 7.0f;

        Matrix4x4 product = mat1 * mat2;

        MYINFO("Matrix 1:");
        ::std::cout << mat1 << ::std::endl;
        MYINFO("Matrix 2:");
        ::std::cout << mat2 << ::std::endl;
        MYINFO("Matrix 1 * Matrix 2:");
        ::std::cout << product << ::std::endl;
    }

    //=============================
    void testCustomTypesOnCUDA()
    {
        MY_ECHOFUNPATH;
        MYINFO("Testing custom types on CUDA...");

        const int size = 1024;
        const int blockSize = 256;
        const int gridSize = (size + blockSize - 1) / blockSize;

        // 分配主机内存
        Vector3<float>* h_a = new Vector3<float>[size];
        Vector3<float>* h_b = new Vector3<float>[size];
        Vector3<float>* h_c = new Vector3<float>[size];

        // 初始化数据
        for (int i = 0; i < size; ++i)
        {
            h_a[i] = Vector3<float>(i * 1.0f, i * 2.0f, i * 3.0f);
            h_b[i] = Vector3<float>(i * 4.0f, i * 5.0f, i * 6.0f);
        }

        // 分配设备内存
        Vector3<float>*d_a, *d_b, *d_c;
        MYCHECK(cudaMalloc(&d_a, size * sizeof(Vector3<float>)));
        MYCHECK(cudaMalloc(&d_b, size * sizeof(Vector3<float>)));
        MYCHECK(cudaMalloc(&d_c, size * sizeof(Vector3<float>)));

        // 复制数据到设备
        MYCHECK(cudaMemcpy(d_a, h_a, size * sizeof(Vector3<float>), cudaMemcpyHostToDevice));
        MYCHECK(cudaMemcpy(d_b, h_b, size * sizeof(Vector3<float>), cudaMemcpyHostToDevice));

        // 启动内核
        vectorAddKernel<float><<<gridSize, blockSize>>>(d_a, d_b, d_c, size);
        MYCHECK(cudaDeviceSynchronize());

        // 复制结果回主机
        MYCHECK(cudaMemcpy(h_c, d_c, size * sizeof(Vector3<float>), cudaMemcpyDeviceToHost));

        // 验证结果
        bool success = true;
        for (int i = 0; i < 5; ++i)
        {  // 只验证前5个结果
            Vector3<float> expected = h_a[i] + h_b[i];
            if (h_c[i].x != expected.x || h_c[i].y != expected.y || h_c[i].z != expected.z)
            {
                success = false;
                break;
            }
        }

        if (success)
        {
            MYINFO("CUDA vector addition test passed!");
            MYINFO("Sample result: h_a[0] + h_b[0] = {}", h_c[0]);
        }
        else
        {
            MYERROR("CUDA vector addition test failed!");
        }

        // 清理内存
        delete[] h_a;
        delete[] h_b;
        delete[] h_c;
        MYCHECK(cudaFree(d_a));
        MYCHECK(cudaFree(d_b));
        MYCHECK(cudaFree(d_c));
    }

    //============================= run these tests
    std::int8_t tt()
    {
        // 测试 Vector3
        testVector3();

        // 测试 Matrix4x4
        testMatrix4x4();

        // 测试 CUDA 上的自定义类型
        testCustomTypesOnCUDA();

        return 0;
    }

}  // namespace te_cuda_2

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx cuda 向量加法
namespace te_cuda_3
{

    // 向量加法核函数，grid和block都是1D
    __global__ void add(float* x, float* y, float* z, int n)
    {
        // 获取全局索引
        int index = threadIdx.x + blockIdx.x * blockDim.x;
        // 步长
        int stride = blockDim.x * gridDim.x;
        for (int i = index; i < n; i += stride)
        {
            z[i] = x[i] + y[i];
        }
    }

    //============================= 测试
    std::int8_t tt()
    {
        MY_ECHOFUNPATH;
        int N = 1 << 20;
        int nBytes = N * sizeof(float);
        // 分配主机内存
        float *x, *y, *z;
        x = (float*)malloc(nBytes);
        y = (float*)malloc(nBytes);
        z = (float*)malloc(nBytes);
        // 初始化数据
        for (int i = 0; i < N; ++i)
        {
            x[i] = 10.0;
            y[i] = 20.0;
        }

        // 分配设备内存
        float *d_x, *d_y, *d_z;
        CudaMaloc<float>(N, d_x, d_y, d_z);

        // 从主机内存复制数据到设备内存
        cudaMemcpy((void*)d_x, (void*)x, nBytes, cudaMemcpyHostToDevice);
        cudaMemcpy((void*)d_y, (void*)y, nBytes, cudaMemcpyHostToDevice);
        // 定义核函数执行配置
        dim3 blockSize(256);
        dim3 gridSize((N + blockSize.x - 1) / blockSize.x);
        // 执行核函数
        add<<<gridSize, blockSize>>>(d_x, d_y, d_z, N);
        // 等待所有线程执行完毕
        cudaDeviceSynchronize();
        // 从设备内存复制数据到主机内存
        cudaMemcpy((void*)z, (void*)d_z, nBytes, cudaMemcpyDeviceToHost);
        // 检查执行结果
        float maxError = 0.0;
        for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(z[i] - 30.0));
        MYINFO("Maximum error: {0}", maxError);

        // 释放设备内存
        cudaFree(d_x);
        cudaFree(d_y);
        cudaFree(d_z);
        // 释放主机内存
        free(x);
        free(y);
        free(z);

        return 0;
    }

};  // namespace te_cuda_3

//xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx test cuda 4
namespace te_cuda_4
{

    // 使用上述宏的示例
    CUDA_GLBL addVectors(float* a, float* b, float* c, int n)
    {
        CUDA_KRNL_LOOP(i, n) { c[i] = a[i] + b[i]; }
    }

    std::int8_t tt()
    {
        MY_ECHOFUNPATH;

        constexpr int cnt = 1000;
        float *a, *b, *c;

        // 使用宏分配内存
        CudaMaloc<float>(cnt, a, b, c);

        MYINFO("initial data");
        // 初始化数据...
        // 在主机上创建并初始化数组
        float* host_a = (float*)malloc(cnt * sizeof(float));
        float* host_b = (float*)malloc(cnt * sizeof(float));
        for (int i = 0; i < cnt; ++i)
        {
            host_a[i] = static_cast<float>(i);
            host_b[i] = static_cast<float>(i * 2);
        }
        // 将数据从主机复制到设备
        CUDA_MCPYC(float, a, host_a, cnt, cudaMemcpyHostToDevice);
        CUDA_MCPYC(float, b, host_b, cnt, cudaMemcpyHostToDevice);
        // 释放主机内存
        free(host_a);
        free(host_b);

        // 注意：不能直接访问设备内存，所以这行代码会注释掉
        // printf("a[0] = %f\n", a[0]);

        // 使用宏启动内核
        constexpr int threads = 256;
        constexpr int blocks = (cnt + threads - 1) / threads;
        CUDA_KRNL_LUNC(addVectors, blocks, threads, a, b, c, cnt);

        // 同步设备
        CUDA_CALLC(cudaDeviceSynchronize());

        // 将结果从设备内存复制到主机内存以打印
        float* host_c = (float*)malloc(cnt * sizeof(float));
        CUDA_MCPYC(float, host_c, c, cnt, cudaMemcpyDeviceToHost);

        // 等间隔打印 10 个值
        printf("计算结果 c 的 10 个等间隔值：\n");
        int step = cnt / 9;  // 计算步长
        for (int i = 0; i < 10; i++)
        {
            int index = i * step;
            if (index >= cnt) index = cnt - 1;  // 防止越界
            printf("c[%d] = %f\n", index, host_c[index]);
        }

        // 释放主机内存
        free(host_c);

        // 使用宏释放内存
        CudaFreeC(a, b, c);

        return 0;
    }
}  // namespace te_cuda_4
