// NOLINTBEGIN

#include <cuda_runtime.h>

#include <memory>
#include <vector>

__global__ void add_vectors_kernel(const size_t* input_vectors, size_t* result,
                                   size_t num_vectors, size_t vector_size) {
  size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
  if (idx < vector_size) {
    size_t sum = 0;
    for (size_t i = 0; i < num_vectors; ++i) {
      sum += input_vectors[i * vector_size + idx];
    }
    result[idx] = sum;
  }
}

extern "C++" std::shared_ptr<std::vector<size_t>> add_vectors_impl(
    const std::vector<std::shared_ptr<std::vector<size_t>>>& vectors) {
  if (vectors.empty()) return std::make_shared<std::vector<size_t>>();

  size_t num_vectors = vectors.size();
  size_t vector_size = vectors[0]->size();

  // Allocate device memory
  size_t* d_input_vectors;
  size_t* d_result;
  cudaMalloc(&d_input_vectors, num_vectors * vector_size * sizeof(size_t));
  cudaMalloc(&d_result, vector_size * sizeof(size_t));

  // Copy data to device
  for (size_t i = 0; i < num_vectors; ++i) {
    cudaMemcpy(d_input_vectors + i * vector_size, vectors[i]->data(),
               vector_size * sizeof(size_t), cudaMemcpyHostToDevice);
  }

  // Launch kernel
  size_t blockSize = 256;
  size_t numBlocks = (vector_size + blockSize - 1) / blockSize;
  add_vectors_kernel<<<numBlocks, blockSize>>>(d_input_vectors, d_result,
                                               num_vectors, vector_size);

  // Allocate result vector on host
  auto result = std::make_shared<std::vector<size_t>>(vector_size);

  // Copy result back to host
  cudaMemcpy(result->data(), d_result, vector_size * sizeof(size_t),
             cudaMemcpyDeviceToHost);

  // Free device memory
  cudaFree(d_input_vectors);
  cudaFree(d_result);

  return result;
}

// NOLINTEND
