#pragma once
#include <vector>
#include <random>
#include <cmath>
#include <stdexcept>
#include <string>
#include <functional>
#include <cstring>
#include "Dashun.h"

/* 卷积层类 */
class Conv: public ILayerConvolutional {
public:
    /* 构造函数 */
    Conv(
        int in_channels,
        int out_channels,
        int kernel_height,
        int kernel_width,
        int stride = 1,
        int padding = 0,
        float learning_rate = 0.01f,
        ActivationFunc activation_func = ActivationFunc::ReLU
    ) : in_channels(in_channels),
        out_channels(out_channels),
        kernel_height(kernel_height),
        kernel_width(kernel_width),
        stride(stride),
        padding(padding),
        learning_rate(learning_rate),
        activation_func(activation_func) {
        
        // 初始化权重和偏置
        initializeWeights();
        initializeBias();
    }

    /* 从参数构造 */
    Conv(
        int in_channels,
        int out_channels,
        int kernel_height,
        int kernel_width,
        int stride,
        int padding,
        float learning_rate,
        ActivationFunc activation_func,
        const std::vector<float>& weights,
        const std::vector<float>& bias
    ) : in_channels(in_channels),
        out_channels(out_channels),
        kernel_height(kernel_height),
        kernel_width(kernel_width),
        stride(stride),
        padding(padding),
        learning_rate(learning_rate),
        activation_func(activation_func),
        weights(weights),
        bias(bias) {
        
        // 形状检查
        long expected_weights_size = out_channels * in_channels * kernel_height * kernel_width;
        if (weights.size() != expected_weights_size) {
            throw std::invalid_argument("Weights size mismatch");
        }
        
        if (bias.size() != out_channels) {
            throw std::invalid_argument("Bias size mismatch");
        }
    }

    /* 工厂方法创建层 */
    static Conv Create(
        int in_channels,
        int out_channels,
        int kernel_height,
        int kernel_width,
        int stride = 1,
        int padding = 0,
        float learning_rate = 0.01f,
        ActivationFunc activation_func = ActivationFunc::ReLU
    ) {
        return Conv(in_channels, out_channels, kernel_height, kernel_width,
                                stride, padding, learning_rate, activation_func);
    }

    /* 设置权重和偏置 */
    void setWeights(const std::vector<float>& weights, const std::vector<float>& bias) {
        long expected_weights_size = out_channels * in_channels * kernel_height * kernel_width;
        if (weights.size() != expected_weights_size) {
            throw std::invalid_argument("Weights size mismatch");
        }
        
        if (bias.size() != out_channels) {
            throw std::invalid_argument("Bias size mismatch");
        }
        
        this->weights = weights;
        this->bias = bias;
    }

    /* 前向传播 */
    std::vector<float> forward(const std::vector<float>& input,
                              int batch_size,
                              int input_height,
                              int input_width) {
        
        // 保存输入形状和原始输入
        this->batch_size = batch_size;
        this->input_height = input_height;
        this->input_width = input_width;
        this->input = input;
        
        // 计算输出形状
        output_height = (input_height + 2 * padding - kernel_height) / stride + 1;
        output_width = (input_width + 2 * padding - kernel_width) / stride + 1;
        
        // im2col转换
        im2col(input);
        
        // 重塑权重为2D矩阵 (out_channels, in_channels * kernel_height * kernel_width)
        // 权重已经是一维存储: [oc][ic][kh][kw]
        // 转换成: [oc][ic*kh*kw]
        
        // 计算输出
        std::vector<float> output(batch_size * out_channels * output_height * output_width, 0.0f);
        pre_activation_output = output; // 复制一份用于激活前输出
        
        for (int b = 0; b < batch_size; ++b) {
            // 对每个批次进行卷积计算
            for (int oc = 0; oc < out_channels; ++oc) {
                for (int oh = 0; oh < output_height; ++oh) {
                    for (int ow = 0; ow < output_width; ++ow) {
                        float sum = 0.0f;
                        
                        // 计算卷积和
                        for (int ic = 0; ic < in_channels; ++ic) {
                            for (int kh = 0; kh < kernel_height; ++kh) {
                                for (int kw = 0; kw < kernel_width; ++kw) {
                                    // 输入位置
                                    int ih = oh * stride + kh - padding;
                                    int iw = ow * stride + kw - padding;
                                    
                                    // 检查边界
                                    if (ih >= 0 && ih < input_height && iw >= 0 && iw < input_width) {
                                        int input_idx = ((b * in_channels + ic) * input_height + ih) * input_width + iw;
                                        int weight_idx = ((oc * in_channels + ic) * kernel_height + kh) * kernel_width + kw;
                                        
                                        sum += input[input_idx] * weights[weight_idx];
                                    }
                                }
                            }
                        }
                        
                        // 添加偏置
                        int output_idx = ((b * out_channels + oc) * output_height + oh) * output_width + ow;
                        pre_activation_output[output_idx] = sum + bias[oc];
                        output[output_idx] = sum + bias[oc];
                    }
                }
            }
        }
        
        // 应用激活函数
        applyActivation(output);
        
        return output;
    }

    /* 反向传播 */
    std::vector<float> backward(const std::vector<float>& dout) {
        // dout形状: [batch_size, out_channels, output_height, output_width]
        
        // 计算激活函数的导数
        std::vector<float> dout_activated = dout;
        if (activation_func != ActivationFunc::None) {
            applyActivationDerivative(dout_activated, pre_activation_output);
        }
        
        // 初始化梯度
        weights_grad = std::vector<float>(out_channels * in_channels * kernel_height * kernel_width, 0.0f);
        bias_grad = std::vector<float>(out_channels, 0.0f);
        
        // 计算权重梯度和偏置梯度
        for (int b = 0; b < batch_size; ++b) {
            for (int oc = 0; oc < out_channels; ++oc) {
                for (int oh = 0; oh < output_height; ++oh) {
                    for (int ow = 0; ow < output_width; ++ow) {
                        int dout_idx = ((b * out_channels + oc) * output_height + oh) * output_width + ow;
                        float dout_val = dout_activated[dout_idx];
                        
                        // 更新偏置梯度
                        bias_grad[oc] += dout_val;
                        
                        // 更新权重梯度
                        for (int ic = 0; ic < in_channels; ++ic) {
                            for (int kh = 0; kh < kernel_height; ++kh) {
                                for (int kw = 0; kw < kernel_width; ++kw) {
                                    int ih = oh * stride + kh - padding;
                                    int iw = ow * stride + kw - padding;
                                    
                                    if (ih >= 0 && ih < input_height && iw >= 0 && iw < input_width) {
                                        int input_idx = ((b * in_channels + ic) * input_height + ih) * input_width + iw;
                                        int weight_grad_idx = ((oc * in_channels + ic) * kernel_height + kh) * kernel_width + kw;
                                        
                                        weights_grad[weight_grad_idx] += input[input_idx] * dout_val;
                                    }
                                }
                            }
                        }
                    }
                }
            }
        }
        
        // 批量归一化
        float batch_norm = 1.0f / batch_size;
        for (auto& grad : weights_grad) {
            grad *= batch_norm;
        }
        for (auto& grad : bias_grad) {
            grad *= batch_norm;
        }
        
        // 计算输入梯度
        std::vector<float> dx(batch_size * in_channels * input_height * input_width, 0.0f);
        
        for (int b = 0; b < batch_size; ++b) {
            for (int ic = 0; ic < in_channels; ++ic) {
                for (int ih = 0; ih < input_height; ++ih) {
                    for (int iw = 0; iw < input_width; ++iw) {
                        float sum = 0.0f;
                        
                        for (int oc = 0; oc < out_channels; ++oc) {
                            for (int kh = 0; kh < kernel_height; ++kh) {
                                for (int kw = 0; kw < kernel_width; ++kw) {
                                    int oh = (ih - kh + padding) / stride;
                                    int ow = (iw - kw + padding) / stride;
                                    
                                    if (oh >= 0 && oh < output_height && ow >= 0 && ow < output_width &&
                                        (ih - kh + padding) % stride == 0 &&
                                        (iw - kw + padding) % stride == 0) {
                                        
                                        int dout_idx = ((b * out_channels + oc) * output_height + oh) * output_width + ow;
                                        int weight_idx = ((oc * in_channels + ic) * kernel_height + kh) * kernel_width + kw;
                                        
                                        sum += dout_activated[dout_idx] * weights[weight_idx];
                                    }
                                }
                            }
                        }
                        
                        int dx_idx = ((b * in_channels + ic) * input_height + ih) * input_width + iw;
                        dx[dx_idx] = sum;
                    }
                }
            }
        }
        
        // 更新权重
        update();
        
        return dx;
    }

    /* 更新权重 */
    void update() {
        // 更新权重
        for (size_t i = 0; i < weights.size(); ++i) {
            weights[i] -= learning_rate * weights_grad[i];
        }
        
        // 更新偏置
        for (int i = 0; i < out_channels; ++i) {
            bias[i] -= learning_rate * bias_grad[i];
        }
    }

    /* 获取层信息 */
    std::string getInfo() const {
        return "Conv Layer: in_channels=" + std::to_string(in_channels) +
               ", out_channels=" + std::to_string(out_channels) +
               ", kernel_size=(" + std::to_string(kernel_height) + "," + std::to_string(kernel_width) + ")" +
               ", stride=" + std::to_string(stride) +
               ", padding=" + std::to_string(padding);
    }

private:
    // 网络参数
    int in_channels;
    int out_channels;
    int kernel_height;
    int kernel_width;
    int stride;
    int padding;
    float learning_rate;
    ActivationFunc activation_func;
    
    // 权重和偏置（一维存储）
    std::vector<float> weights;  // 形状: [out_channels][in_channels][kernel_height][kernel_width]
    std::vector<float> bias;     // 形状: [out_channels]
    
    // 缓存
    std::vector<float> input;            // 原始输入
    std::vector<float> col;              // im2col结果
    std::vector<float> pre_activation_output; // 激活前的输出
    
    // 梯度
    std::vector<float> weights_grad;
    std::vector<float> bias_grad;
    
    // 形状信息
    int batch_size;
    int input_height;
    int input_width;
    int output_height;
    int output_width;

    /* 初始化权重 */
    void initializeWeights() {
        long total_weights = out_channels * in_channels * kernel_height * kernel_width;
        weights.resize(total_weights);
        
        // Xavier初始化
        float scale = std::sqrt(2.0f / (in_channels * kernel_height * kernel_width + 
                                      out_channels * kernel_height * kernel_width));
        
        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_real_distribution<float> dist(-scale, scale);
        
        for (long i = 0; i < total_weights; ++i) {
            weights[i] = dist(gen);
        }
    }

    /* 初始化偏置 */
    void initializeBias() {
        bias.resize(out_channels, 0.0f);
    }

    /* im2col转换 */
    void im2col(const std::vector<float>& input) {
        // 计算输出形状
        output_height = (input_height + 2 * padding - kernel_height) / stride + 1;
        output_width = (input_width + 2 * padding - kernel_width) / stride + 1;
        
        // 计算col的大小
        long col_size = batch_size * (in_channels * kernel_height * kernel_width) * (output_height * output_width);
        col.resize(col_size, 0.0f);
        
        // 实现im2col
        for (int b = 0; b < batch_size; ++b) {
            for (int ic = 0; ic < in_channels; ++ic) {
                for (int kh = 0; kh < kernel_height; ++kh) {
                    for (int kw = 0; kw < kernel_width; ++kw) {
                        int col_channel_idx = ((b * in_channels + ic) * kernel_height + kh) * kernel_width + kw;
                        
                        for (int oh = 0; oh < output_height; ++oh) {
                            for (int ow = 0; ow < output_width; ++ow) {
                                int ih = oh * stride + kh - padding;
                                int iw = ow * stride + kw - padding;
                                
                                int col_idx = col_channel_idx * (output_height * output_width) + oh * output_width + ow;
                                
                                if (ih >= 0 && ih < input_height && iw >= 0 && iw < input_width) {
                                    int input_idx = ((b * in_channels + ic) * input_height + ih) * input_width + iw;
                                    col[col_idx] = input[input_idx];
                                } else {
                                    col[col_idx] = 0.0f; // 填充为0
                                }
                            }
                        }
                    }
                }
            }
        }
    }

    /* 应用激活函数 */
    void applyActivation(std::vector<float>& output) {
        if (activation_func == ActivationFunc::None) {
            return;
        }
        
        for (float& val : output) {
            switch (activation_func) {
                case ActivationFunc::ReLU:
                    val = std::max(0.0f, val);
                    break;
                case ActivationFunc::Sigmoid:
                    val = 1.0f / (1.0f + std::exp(-val));
                    break;
                case ActivationFunc::Tanh:
                    val = std::tanh(val);
                    break;
                default:
                    break;
            }
        }
    }

    /* 应用激活函数导数 */
    void applyActivationDerivative(std::vector<float>& dout, const std::vector<float>& pre_activation) {
        if (activation_func == ActivationFunc::None) {
            return;
        }
        
        for (size_t i = 0; i < dout.size(); ++i) {
            float val = pre_activation[i];
            float derivative = 0.0f;
            
            switch (activation_func) {
                case ActivationFunc::ReLU:
                    derivative = (val > 0.0f) ? 1.0f : 0.0f;
                    break;
                case ActivationFunc::Sigmoid:
                    derivative = val * (1.0f - val);
                    break;
                case ActivationFunc::Tanh:
                    derivative = 1.0f - val * val;
                    break;
                default:
                    derivative = 1.0f;
                    break;
            }
            
            dout[i] *= derivative;
        }
    }
};