#include <cuda_runtime.h>
#include <iostream>

void printMatrix(float* matrix, int rows, int cols, const char* name) {
    std::cout << "Matrix " << name << ":" << std::endl;
    for(int i = 0; i < rows; ++i) {
        for(int j = 0; j < cols; ++j) {
            std::cout << matrix[i * cols + j] << " ";
        }
        std::cout << std::endl;
    }
}

// 定义 transpose kernel
__global__ void transpose_nchw_to_nhwc(const float* __restrict__ input, float* __restrict__ output,
                                       int N, int C, int H, int W) {
    int n = blockIdx.z;
    int h = threadIdx.y + blockIdx.y * blockDim.y;
    int w = threadIdx.x + blockIdx.x * blockDim.x;

    if (n < N && h < H && w < W) {
        for (int c = 0; c < C; ++c) {
            int nchw_idx = n * C * H * W + c * H * W + h * W + w;
            int nhwc_idx = n * H * W * C + h * W * C + w * C + c;
            output[nhwc_idx] = input[nchw_idx];
        }
    }
}

// 反向转换 kernel
__global__ void transpose_nhwc_to_nchw(const float* __restrict__ input, float* __restrict__ output,
                                       int N, int C, int H, int W) {
    int n = blockIdx.z;
    int h = threadIdx.y + blockIdx.y * blockDim.y;
    int w = threadIdx.x + blockIdx.x * blockDim.x;

    if (n < N && h < H && w < W) {
        for (int c = 0; c < C; ++c) {
            int nhwc_idx = n * H * W * C + h * W * C + w * C + c;
            int nchw_idx = n * C * H * W + c * H * W + h * W + w;
            output[nchw_idx] = input[nhwc_idx];
        }
    }
}

void printMatrix(float* matrix, int rows, int cols, const char* name) {
    std::cout << "Matrix " << name << ":" << std::endl;
    for(int i = 0; i < rows; ++i) {
        for(int j = 0; j < cols; ++j) {
            std::cout << matrix[i * cols + j] << " ";
        }
        std::cout << std::endl;
    }
}

int main() {
    // 测试数据尺寸
    const int N = 1, C = 3, H = 4, W = 4;

    size_t size_nchw = N * C * H * W * sizeof(float);
    size_t size_nhwc = N * H * W * C * sizeof(float);

    float* h_input_nchw = new float[N * C * H * W];
    float* h_output_nhwc = new float[N * H * W * C];
    float* h_output_nchw = new float[N * C * H * W];

    // 初始化输入数据
    for (int i = 0; i < N * C * H * W; ++i) {
        h_input_nchw[i] = static_cast<float>(i);
    }

    // 打印输入矩阵
    printMatrix(h_input_nchw, C * H, W, "Input NCHW");

    // 分配设备内存
    float* d_input_nchw, *d_output_nhwc, *d_output_nchw;
    cudaMalloc(&d_input_nchw, size_nchw);
    cudaMalloc(&d_output_nhwc, size_nhwc);
    cudaMalloc(&d_output_nchw, size_nchw);

    // 将输入数据拷贝到设备
    cudaMemcpy(d_input_nchw, h_input_nchw, size_nchw, cudaMemcpyHostToDevice);

    // 设置 grid 和 block 尺寸
    dim3 blockDim(16, 16); // 每个 block 处理 16x16 的像素
    dim3 gridDim((W + blockDim.x - 1) / blockDim.x,
                 (H + blockDim.y - 1) / blockDim.y,
                 N);

    // 调用 transpose kernel
    transpose_nchw_to_nhwc<<<gridDim, blockDim>>>(d_input_nchw, d_output_nhwc, N, C, H, W);
    cudaDeviceSynchronize();
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        std::cerr << "Kernel error (NCHW -> NHWC): " << cudaGetErrorString(err) << std::endl;
        return -1;
    }

    // 调用反向 transpose kernel
    transpose_nhwc_to_nchw<<<gridDim, blockDim>>>(d_output_nhwc, d_output_nchw, N, C, H, W);
    cudaDeviceSynchronize();
    err = cudaGetLastError();
    if (err != cudaSuccess) {
        std::cerr << "Kernel error (NHWC -> NCHW): " << cudaGetErrorString(err) << std::endl;
        return -1;
    }

    // 将结果拷贝回主机
    cudaMemcpy(h_output_nchw, d_output_nchw, size_nchw, cudaMemcpyDeviceToHost);

    // 打印输出矩阵
    printMatrix(h_output_nchw, C * H, W, "Output NCHW after round trip");

    // 验证结果
    bool success = true;
    for (int i = 0; i < N * C * H * W; ++i) {
        if (fabsf(h_input_nchw[i] - h_output_nchw[i]) > 1e-5) {
            std::cout << "Mismatch at index " << i << ": " << h_input_nchw[i] << " vs " << h_output_nchw[i] << std::endl;
            success = false;
            break;
        }
    }

    if (success) {
        std::cout << "Transpose and inverse transpose successful!" << std::endl;
    }

    // 清理
    delete[] h_input_nchw;
    delete[] h_output_nhwc;
    delete[] h_output_nchw;
    cudaFree(d_input_nchw);
    cudaFree(d_output_nhwc);
    cudaFree(d_output_nchw);

    return 0;
}