#include <iostream>

#include "ndarray.cuh"

#define DATA_SIZE 128
__global__ void dummy_kernel(float *input_data, int N) {
    int thread_id = blockIdx.x * blockDim.x + threadIdx.x;

    auto m1 = NdArray::from_data_and_shape(input_data + thread_id * DATA_SIZE, {4, 4, 8});
    auto m2 = m1.reshape({16, 8});
    printf("Block %d Thread %d:\n m1 %s\n", blockIdx.x, threadIdx.x, m1.ToString().c_str());
    printf("Block %d Thread %d:\n m2 %s\n", blockIdx.x, threadIdx.x, m2.ToString().c_str());
}

#define BLOCK_SIZE 16

auto divup(size_t a, size_t b) -> size_t {
    return (a + b - 1) / b;
}

int main() {
    const size_t grid_size = 32;
    const size_t N = BLOCK_SIZE * grid_size * DATA_SIZE;
    float h_A[N];

    
    // 生成正弦波测试数据
    for (int i = 0; i < N; ++i) {
        h_A[i] = i;
    }

    // 设备内存分配
    float *d_A;

    cudaMalloc(&d_A, N * sizeof(float));
    cudaMemcpy(d_A, h_A, N * sizeof(float), cudaMemcpyHostToDevice);
    // 启动核函数
    auto block_size = BLOCK_SIZE;

    dim3 grid(grid_size, 1, 1);    // 2个block
    dim3 block(block_size, 1, 1);  // 每个block 4个线程

    RUN_KERNEL_WITH_MEMPOOL(dummy_kernel, grid, block, d_A, N);

    // 错误检查
    cudaError_t cu_error = cudaDeviceSynchronize();
    if (cu_error != cudaSuccess) {
        std::cerr << "CUDA error: " << cudaGetErrorString(cu_error) << std::endl;
    }

    printf("\n");
    // 清理资源
    cudaFree(d_A);

    return 0;
}