#include "Layer.hpp"
#include <vector>
#include <memory>
#include <random>
#include <algorithm>
#include <iostream>
#include <stdexcept>

namespace qing {
    class LinearBlock : public Layer {
    private:
        /* 权重和偏置张量 */
        std::unique_ptr<std::vector<double>> weights;  // 权重矩阵: output_num × input_num
        std::unique_ptr<std::vector<double>> bias;     // 偏置向量: output_num
        std::unique_ptr<std::vector<double>> weights_grad; // 权重梯度
        std::unique_ptr<std::vector<double>> bias_grad;    // 偏置梯度
        
        /* 前向传播缓存 */
        std::unique_ptr<std::vector<double>> input_cache;  // 缓存输入用于反向传播

        int input_num;
        int output_num;
        bool is_training;

        /* 初始化权重和偏置 */
        void initialize_parameters() {
            // Xavier/Glorot 初始化
            double scale = std::sqrt(2.0 / (input_num + output_num));
            std::random_device rd;
            std::mt19937 gen(rd());
            std::normal_distribution<double> dist(0.0, scale);
            
            // 初始化权重
            for (int i = 0; i < output_num * input_num; ++i) {
                (*weights)[i] = dist(gen);
            }
            
            // 初始化偏置为0
            std::fill(bias->begin(), bias->end(), 0.0);
        }

        /* 创建这一层的所有参数 */
        void crt_layer(long all_number) {
            weights = std::make_unique<std::vector<double>>(all_number, 0.0);
            bias = std::make_unique<std::vector<double>>(output_num, 0.0);
            weights_grad = std::make_unique<std::vector<double>>(all_number, 0.0);
            bias_grad = std::make_unique<std::vector<double>>(output_num, 0.0);
            input_cache = std::make_unique<std::vector<double>>(input_num, 0.0);
        }

    public:
        LinearBlock(int input_num, int output_num) 
            : input_num(input_num), output_num(output_num), is_training(true) {
            crt_layer(input_num * output_num);
            initialize_parameters();
        }

        // 前向传播
        std::vector<double> forward(const std::vector<double>& input) override {
            if (input.size() != input_num) {
                throw std::invalid_argument("Input size does not match layer input dimension");
            }
            
            // 缓存输入用于反向传播
            *input_cache = input;
            
            // 计算输出: output = input × weights^T + bias
            std::vector<double> output(output_num, 0.0);
            
            for (int i = 0; i < output_num; ++i) {
                double sum = (*bias)[i];  // 加上偏置
                
                for (int j = 0; j < input_num; ++j) {
                    int weight_idx = i * input_num + j;
                    sum += input[j] * (*weights)[weight_idx];
                }
                
                output[i] = sum;
            }
            
            return output;
        }

        // 反向传播
        std::vector<double> backward(const std::vector<double>& grad_output) override {
            if (grad_output.size() != output_num) {
                throw std::invalid_argument("Gradient output size does not match layer output dimension");
            }
            
            // 重置梯度
            std::fill(weights_grad->begin(), weights_grad->end(), 0.0);
            std::fill(bias_grad->begin(), bias_grad->end(), 0.0);
            
            // 计算权重梯度: dW = grad_output^T × input
            for (int i = 0; i < output_num; ++i) {
                for (int j = 0; j < input_num; ++j) {
                    int weight_grad_idx = i * input_num + j;
                    (*weights_grad)[weight_grad_idx] = grad_output[i] * (*input_cache)[j];
                }
            }
            
            // 计算偏置梯度: db = grad_output
            for (int i = 0; i < output_num; ++i) {
                (*bias_grad)[i] = grad_output[i];
            }
            
            // 计算输入梯度: grad_input = grad_output × weights
            std::vector<double> grad_input(input_num, 0.0);
            for (int j = 0; j < input_num; ++j) {
                for (int i = 0; i < output_num; ++i) {
                    int weight_idx = i * input_num + j;
                    grad_input[j] += grad_output[i] * (*weights)[weight_idx];
                }
            }
            
            return grad_input;
        }

        // 更新参数
        void update_parameters(double learning_rate) override {
            // 更新权重
            for (int i = 0; i < output_num * input_num; ++i) {
                (*weights)[i] -= learning_rate * (*weights_grad)[i];
            }
            
            // 更新偏置
            for (int i = 0; i < output_num; ++i) {
                (*bias)[i] -= learning_rate * (*bias_grad)[i];
            }
        }

        // 获取层类型名称
        std::string get_layer_type() const override {
            return "Linear";
        }

        // 获取输入维度
        int get_input_dim() const override {
            return input_num;
        }

        // 获取输出维度
        int get_output_dim() const override {
            return output_num;
        }

        // 设置训练/测试模式
        void set_training(bool training) override {
            is_training = training;
        }

        // 获取参数数量
        int get_parameter_count() const override {
            return weights->size() + bias->size();
        }

        // 获取权重和偏置（用于测试或保存模型）
        const std::vector<double>& get_weights() const { return *weights; }
        const std::vector<double>& get_bias() const { return *bias; }
        
        // 设置权重和偏置（用于加载预训练模型）
        void set_weights(const std::vector<double>& new_weights) {
            if (new_weights.size() != weights->size()) {
                throw std::invalid_argument("Weights size mismatch");
            }
            *weights = new_weights;
        }
        
        void set_bias(const std::vector<double>& new_bias) {
            if (new_bias.size() != bias->size()) {
                throw std::invalid_argument("Bias size mismatch");
            }
            *bias = new_bias;
        }

        // 打印层信息
        void print_info() const {
            std::cout << "Linear Layer: " << input_num << " -> " << output_num 
                    << " (Parameters: " << get_parameter_count() << ")" << std::endl;
        }
    };
}