#include "add.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "stdio.h"

__global__ void addKernel(float *a, float *b, float *c, int n) {
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
        c[i] = a[i] + b[i];
    }
}

void launch_add(float *a, float *b, float *c, int n) {
    int blockSize = 256;
    int gridSize = 1;
    dim3 grid(gridSize);
    dim3 block(blockSize);
    addKernel<<<grid, block>>>(a, b, c, n);
}

void torch_launch_add(torch::Tensor& a,
                      torch::Tensor& b,
                      torch::Tensor& c,
                      int n)
{
    launch_add((float*)a.data_ptr(),
               (float*)b.data_ptr(),
               (float*)c.data_ptr(),
               n);
}

// int main() {
//     int n = 1024 * 1024;
//     float *a = (float *)malloc(n * sizeof(float));
//     float *b = (float *)malloc(n * sizeof(float));
//     float *c = (float *)malloc(n * sizeof(float));
//     for (int i = 0; i < n; ++i) {
//         a[i] = 1.0;
//         b[i] = 2.0;
//         c[i] = 0.0;
//     }
//     float *d_a, *d_b, *d_c;
//     cudaMalloc((void**)&d_a, n * sizeof(float));
//     cudaMalloc((void**)&d_b, n * sizeof(float));
//     cudaMalloc((void**)&d_c, n * sizeof(float));
//     cudaMemcpy(d_a, a, n * sizeof(float), cudaMemcpyHostToDevice);
//     cudaMemcpy(d_b, b, n * sizeof(float), cudaMemcpyHostToDevice);
//     cudaMemcpy(d_c, c, n * sizeof(float), cudaMemcpyHostToDevice);
//     launch_add(d_a, d_b, d_c, n);
//     cudaMemcpy(c, d_c, n * sizeof(float), cudaMemcpyDeviceToHost);
//     printf("c[0] = %f\n", c[0]);
// }