#include "Conv2d.h"

using namespace wawa;


Conv2d::Conv2d(
        size_t in_channels,
        size_t out_channels,
        size_t kernel_size,
        size_t stride,
        size_t padding,
        bool bias)
        : m_in_channels(in_channels),
        m_out_channels(out_channels),
        m_kernel_size(kernel_size),
        m_stride(stride),
        m_padding(padding),
        m_bias(bias),
        m_weights(out_channels, in_channels, kernel_size, kernel_size)
        {
            initialize_parameters();
            if (bias) {
                m_bias_weights = std::shared_ptr<double[]>(new double[out_channels], std::default_delete<double[]>());
                std::memset(m_bias_weights.get(), 0, out_channels * sizeof(double));
            }
        }



std::pair<size_t, size_t> Conv2d::calculate_output_size(size_t H_in, size_t W_in) const {
    size_t H_out = (H_in + 2 * m_padding - m_kernel_size) / m_stride + 1;
    size_t W_out = (W_in + 2 * m_padding - m_kernel_size) / m_stride + 1;

    if (H_out <= 0 || W_out <= 0) {
        throw std::invalid_argument("Invalid convolution parameters");
    }
    return { H_out, W_out };
}



Tensor Conv2d::forward(const Tensor& input) const {

    // 输入维度检查 >> 计算输出尺寸 >> 创建输出张量 >> 执行卷积运算 >> 返回输出张量
    if (input.C() != m_in_channels) {
        throw std::invalid_argument("Input channels mismatch");
    }
    auto [output_height, output_width] = calculate_output_size(input.H(), input.W());
    Tensor output(input.N(), m_out_channels, output_height, output_width);

    convolve(input, output);
    return output;
}


void Conv2d::initialize_parameters() const {
    double std_dev = std::sqrt(2.0 / (m_in_channels * m_kernel_size * m_kernel_size));
    double* data = m_weights.data();
    size_t size = m_weights.size();

#ifdef __AVX2__
    const __m256d scale = _mm256_set1_pd(std_dev);
        const __m256d mean = _mm256_set1_pd(0.0);
        const size_t block_size = 4;
        size_t blocks = size / block_size;

        for (size_t i = 0; i < blocks; ++i) {
            __m256d r = _mm256_mul_pd(_mm256_sqrt_pd(_mm256_mul_pd(
                _mm256_sub_pd(_mm256_set1_pd(1.0), _mm256_loadu_pd(&data[i * block_size])),
                _mm256_set1_pd(-2.0 * std::log(1.0 - data[i * block_size])))),
                scale);
            _mm256_storeu_pd(&data[i * block_size], r);
        }
        // 处理剩余元素
        for (size_t i = blocks * block_size; i < size; ++i) {
            data[i] = std::sqrt(-2.0 * std::log(1.0 - data[i])) * std_dev;
        }
#else
    for (size_t i = 0; i < size; ++i) {
        data[i] = std::sqrt(-2.0 * std::log(1.0 - data[i])) * std_dev;
    }
#endif
}


void Conv2d::convolve(const Tensor& input, Tensor& output) const {
    const size_t N = input.N();
    const size_t C_in = m_in_channels;
    const size_t H_in = input.H();
    const size_t W_in = input.W();
    const size_t C_out = m_out_channels;
    const size_t H_out = output.H();
    const size_t W_out = output.W();
    const size_t K = m_kernel_size;

    // 获取数据指针
    const double* input_data = input.data();
    const double* weight_data = m_weights.data();
    double* output_data = output.data();

    // 并行化批次维度
#pragma omp parallel for collapse(2)
    for (size_t n = 0; n < N; ++n) {                 // 批次维度
        for (size_t oc = 0; oc < C_out; ++oc) {      // 输出通道
            // 每个输出通道的卷积核
            const double* kernel = &weight_data[oc * C_in * K * K];

            for (size_t h = 0; h < H_out; ++h) {     // 输出高度
                for (size_t w = 0; w < W_out; ++w) { // 输出宽度
                    double sum = 0.0;

                    // 计算当前窗口的输入起始位置
                    size_t h_start = h * m_stride - m_padding;
                    size_t w_start = w * m_stride - m_padding;

                    // 遍历输入通道
                    for (size_t ic = 0; ic < C_in; ++ic) {
                        // 输入通道的基地址
                        const double* input_ch = &input_data[ n * C_in * H_in * W_in + ic * H_in * W_in];

                        // 卷积核的基地址
                        const double* kernel_ch = &kernel[ic * K * K];

                        // 执行单通道卷积
                        for (size_t kh = 0; kh < K; ++kh) {
                            for (size_t kw = 0; kw < K; ++kw) {
                                size_t h_in = h_start + kh;
                                size_t w_in = w_start + kw;

                                // 边界检查（隐式padding）
                                if (h_in < H_in && w_in < W_in) {
                                    sum += input_ch[h_in * W_in + w_in] *
                                           kernel_ch[kh * K + kw];
                                }
                            }
                        }
                    }

                    if (m_bias) {
                        sum += m_bias_weights[oc];
                    }

                    // 写入输出
                    output_data[ n * C_out * H_out * W_out + oc * H_out * W_out + h * W_out + w] = sum;
                }
            }
        }
    }
}