// NOLINTBEGIN
#include <cuda_runtime.h>

#include <iostream>

#include "scheduler.hpp"

// CUDA 内核函数，执行向量加法
__global__ void vectorAdd(const float* A, const float* B, float* C, int N) {
  int i = blockIdx.x * blockDim.x + threadIdx.x;  // 计算全局线程索引
  if (i < N) {
    C[i] = A[i] + B[i];  // 执行加法
  }
}

int func() {
  int N = 1 << 20;  // 向量大小（1M）
  size_t size = N * sizeof(float);

  // 在主机上分配内存
  float* h_A = (float*)malloc(size);
  float* h_B = (float*)malloc(size);
  float* h_C = (float*)malloc(size);

  // 初始化输入数据
  for (int i = 0; i < N; ++i) {
    h_A[i] = static_cast<float>(i);
    h_B[i] = static_cast<float>(i);
  }

  // 在设备上分配内存
  float *d_A, *d_B, *d_C;
  cudaMalloc((void**)&d_A, size);
  cudaMalloc((void**)&d_B, size);
  cudaMalloc((void**)&d_C, size);

  // 拷贝数据到设备
  cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
  cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

  // 计算每个块中的线程数和块数
  int threadsPerBlock = 256;
  int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;

  // 启动 CUDA 内核
  vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);

  // 拷贝结果回主机
  cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);

  // 验证结果
  for (int i = 0; i < N; ++i) {
    if (h_C[i] != h_A[i] + h_B[i]) {
      std::cerr << "Error at index " << i << ": " << h_C[i]
                << " != " << (h_A[i] + h_B[i]) << std::endl;
      break;
    }
  }

  std::cout << "Vector addition completed successfully!" << std::endl;

  // 释放设备内存
  cudaFree(d_A);
  cudaFree(d_B);
  cudaFree(d_C);

  // 释放主机内存
  free(h_A);
  free(h_B);
  free(h_C);

  return 0;
}

int main() {
  OldScheduler scheduler(0, 1);

  auto future = scheduler.submit_task(0, func);

  auto ret = future.get();
  std::cout << "Ret: " << ret << "\n";
}

// NOLINTEND
