#include "math_ops.h"
#include <hip/hip_runtime.h>
#include <iostream>

template<typename DTYPE>
__global__ void add_kernel(DTYPE* a, DTYPE* b, DTYPE* result, int num_elements) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;
    if (idx < num_elements) {
        result[idx] = a[idx] + b[idx];
    }
}


Tensor add_gpu(Tensor a, Tensor b) {
    if (a.dims!= b.dims) {
        return Tensor();
    }

    Tensor result = allocateTensorOnGPU(a.dims, a.data_type);

    dim3 threads_per_block(256, 1, 1);
    dim3 num_blocks((a.num_dims + threads_per_block.x - 1) / threads_per_block.x, 1, 1);

    switch (a.data_type) {
        case DataType::FLOAT32:
            add_kernel<<<num_blocks, threads_per_block>>>(
            (float*)a.data, (float*)b.data, (float*)result.data, a.num_dims);
            break;
        case DataType::INT32:
            add_kernel<<<num_blocks, threads_per_block>>>(
            (int*)a.data, (int*)b.data, (int*)result.data, a.num_dims);
        default:
            break;
    }
    hipDeviceSynchronize();
    return result;
}

template <typename DTYPE>
void add_host(DTYPE *a, DTYPE *b, DTYPE *result,  int num_elements) {
    for (int i = 0; i < num_elements; i++) {
        result[i] = a[i] + b[i];
    }
}

Tensor add_cpu(Tensor a, Tensor b) {
    if (a.dims!= b.dims) {
        return Tensor();
    }

    Tensor result = allocateTensorOnCPU(a.dims, a.data_type);
    switch (a.data_type) {
        case DataType::FLOAT32:
            add_host((float*)a.data, (float*)b.data, (float*)result.data, a.num_dims);
            break;
        case DataType::INT32:
            add_host((int*)a.data, (int*)b.data, (int*)result.data, a.num_dims);
        default:
            break;
    }
    hipDeviceSynchronize();
    return result;
}

template <typename DTYPE>
void sigmoid_host(DTYPE *a, DTYPE *result, int num_elements) {
    for (int i = 0; i < num_elements; ++i) {
        result[i] = 1.0f / (1.0f + std::exp(-a[i]));
    }
}

Tensor sigmoid_cpu(Tensor a) {
    int num_elements = a.num_dims;
    Tensor result = allocateTensorOnCPU(a.dims, a.data_type);
    switch (a.data_type) {
        case DataType::FLOAT32:
            sigmoid_host((float*)a.data, (float*)result.data, a.num_dims);
            break;
        default:
            break;
    }
    hipDeviceSynchronize();
    return result;
}

template<typename DTYPE>
__global__ void sigmoid_kernel(DTYPE* a, DTYPE* result, int num_elements) {
    int idx = threadIdx.x + blockIdx.x * blockDim.x;
    if (idx < num_elements) {
        result[idx] = 1.0f / (1.0f + exp(-a[idx]));
    }
}

Tensor sigmoid_gpu(Tensor a) {
    Tensor result = allocateTensorOnGPU(a.dims, a.data_type);

    dim3 threads_per_block(256, 1, 1);
    dim3 num_blocks((a.num_dims + threads_per_block.x - 1) / threads_per_block.x, 1, 1);

    switch (a.data_type) {
        case DataType::FLOAT32:
            sigmoid_kernel<<<num_blocks, threads_per_block>>>(
            (float*)a.data, (float*)result.data, a.num_dims);
            break;
        default:
            break;
    }
    hipDeviceSynchronize();
    return result;
}