#include "conv.h"
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>

// CUDA核函数，执行卷积运算（HWC布局，batch为1）
__global__ void convolution_forward_kernel(float* input, float* kernel, float* output,
                                            int input_channels, int input_height, int input_width,
                                            int kernel_size, int output_channels, int output_height, int output_width,
                                            int stride, int padding) {
    // 计算当前线程处理的输出特征图的位置（由于batch为1，不再有batch维度的索引）
    int out_y = blockIdx.y * blockDim.y + threadIdx.y;
    int out_x = blockIdx.x * blockDim.x + threadIdx.x;

    if (out_y >= output_height || out_x >= output_width) return;

    // 初始化输出值
    float sum = 0.0f;

    // 遍历输入通道和卷积核
    for (int ic = 0; ic < input_channels; ++ic) {
        for (int ky = 0; ky < kernel_size; ++ky) {
            for (int kx = 0; kx < kernel_size; ++kx) {
                int in_y = out_y * stride - padding + ky;
                int in_x = out_x * stride - padding + kx;
                if (in_y >= 0 && in_y < input_height && in_x >= 0 && in_x < input_width) {
                    // 计算对应元素乘积并累加
                    int input_idx = (ic * input_height + in_y) * input_width + in_x;
                    int kernel_idx = ((ic * kernel_size + ky) * kernel_size + kx) * output_channels;
                    sum += input[input_idx] * kernel[kernel_idx];
                }
            }
        }
    }

    // 将计算结果写入输出特征图
    int output_idx = (out_y * output_width + out_x);
    output[output_idx] = sum;
}

void convolution_forward_host(float* input, float* kernel, float* output,
                             int input_channels, int input_height, int input_width,
                             int kernel_size, int output_channels, int output_height, int output_width,
                             int stride, int padding) {
    // 遍历输出特征图的每个位置
    for (int out_y = 0; out_y < output_height; ++out_y) {
        for (int out_x = 0; out_x < output_width; ++out_x) {
            float sum = 0.0f;
            // 遍历输入通道和卷积核
            for (int ic = 0; ic < input_channels; ++ic) {
                for (int ky = 0; ky < kernel_size; ++ky) {
                    for (int kx = 0; kx < kernel_size; ++kx) {
                        int in_y = out_y * stride - padding + ky;
                        int in_x = out_x * stride - padding + kx;
                        if (in_y >= 0 && in_y < input_height && in_x >= 0 && in_x < input_width) {
                            // 计算对应元素乘积并累加
                            int input_idx = (ic * input_height + in_y) * input_width + in_x;
                            int kernel_idx = ((ic * kernel_size + ky) * kernel_size + kx) * output_channels;
                            sum += input[input_idx] * kernel[kernel_idx];
                        }
                    }
                }
            }
            // 将计算结果写入输出特征图
            int output_idx = (out_y * output_width + out_x);
            output[output_idx] = sum;
        }
    }
}

Tensor convolution_forward_gpu(Tensor input, Tensor kernel, ConvolutionParams params) {
    // 获取维度等信息（同CPU版本中计算方式）
    int input_channels = input.dims[2];
    int input_height = input.dims[0];
    int input_width = input.dims[1];
    int kernel_size = kernel.dims[2];
    int output_channels = kernel.dims[0];
    int output_height = (input_height + 2 * params.padding - kernel_size) / params.stride + 1;
    int output_width = (input_width + 2 * params.padding - kernel_size) / params.stride + 1;

    // 分配输出张量内存在GPU上
    Tensor output = allocateTensorOnGPU({output_height, output_width, output_channels}, input.data_type);

    // 配置CUDA线程块和线程
    dim3 threads_per_block(output_width, 1, 1);
    dim3 num_blocks(output_channels, output_height, 1);

    // 调用CUDA核函数
    convolution_forward_kernel<<<num_blocks, threads_per_block>>>(
        (float*)input.data, (float*)kernel.data, (float*)output.data,
        input_channels, input_height, input_width, kernel_size, output_channels, output_height, output_width,
        params.stride, params.padding);

    cudaDeviceSynchronize();  // 等待核函数执行完成

    return output;
}

Tensor convolution_forward_cpu(Tensor input, Tensor kernel, ConvolutionParams params) {
    // 获取维度等信息（同CPU版本中计算方式）
    int input_channels = input.dims[2];
    int input_height = input.dims[0];
    int input_width = input.dims[1];
    int kernel_size = kernel.dims[2];
    int output_channels = kernel.dims[0];
    int output_height = (input_height + 2 * params.padding - kernel_size) / params.stride + 1;
    int output_width = (input_width + 2 * params.padding - kernel_size) / params.stride + 1;

    // 分配输出张量内存在GPU上
    Tensor output = allocateTensorOnGPU({output_height, output_width, output_channels}, input.data_type);

    convolution_forward_host(
        (float*)input.data, (float*)kernel.data, (float*)output.data,
        input_channels, input_height, input_width, kernel_size, output_channels, output_height, output_width,
        params.stride, params.padding);

    cudaDeviceSynchronize();  // 等待核函数执行完成

    return output;
}
