#ifndef DENSELAYER_H
#define DENSELAYER_H

#include "layer.h"
#include "matrix_operations.h"
#include "activation_function.h"
#include <vector>
#include <cmath> // For activation functions
#include <memory>
#include <random>
#include <chrono>

class DenseLayer : public Layer {
private:
    std::vector<std::vector<float>> weights;
    std::vector<float> biases;
    std::vector<std::vector<float>> inputs;
    std::vector<std::vector<float>> zOutputs; // 存储激活函数应用前的输出
    std::vector<std::vector<float>> outputs;
    std::shared_ptr<MatrixOperations> matOps;
    std::shared_ptr<ActivationFunction> activationFunction;

    // Adam 相关的成员变量
    std::vector<std::vector<float>> mWeights, vWeights; // 一阶和二阶矩估计 for weights
    std::vector<float> mBiases, vBiases; // 一阶和二阶矩估计 for biases
    float beta1 = 0.9;
    float beta2 = 0.999;
    float epsilon = 1e-8;
    // 增加了两个整数来跟踪更新次数，以便于实现偏差校正
    int t = 0;

public:
    float learningRate;
    DenseLayer(size_t inputSize, size_t outputSize, float lr, std::shared_ptr<ActivationFunction> activationFunc, std::shared_ptr<MatrixOperations> matOp)
        : learningRate(lr), activationFunction(std::move(activationFunc)), matOps(std::move(matOp)) {
            init(inputSize, outputSize);
        }
    void init(size_t inputSize, size_t outputSize) override;
    std::vector<std::vector<float>> forward(const std::vector<std::vector<float>>& inputs) override;
    std::vector<std::vector<float>> backward(const std::vector<std::vector<float>>& outputErrorGradient) override;
};




void DenseLayer::init(size_t inputSize, size_t outputSize) {
    std::random_device rd;
    std::mt19937 gen(rd());
    std::normal_distribution<> d;

    if(activationFunction != nullptr){
        std::string activationType = activationFunction->getType();
        if (activationType == "ReLU") {
            // 使用He初始化
            d = std::normal_distribution<>(0, std::sqrt(2.0 / inputSize));
        } else if (activationType == "Sigmoid" || activationType == "Tanh") {
            // 使用Xavier初始化
            d = std::normal_distribution<>(0, std::sqrt(1.0 / (inputSize + outputSize)));
        } else {
            // 默认初始化（可以是Xavier或其他）
            d = std::normal_distribution<>(0, std::sqrt(1.0 / (inputSize + outputSize)));
        }
    }else{
        d = std::normal_distribution<>(0, std::sqrt(2.0 / inputSize));
    }

    weights.resize(inputSize, std::vector<float>(outputSize));
    biases.resize(outputSize, 0.0); // 偏置通常可以初始化为0

    for (auto& row : weights) {
        for (auto& weight : row) {
            weight = d(gen);
        }
    }
    // 偏置已经初始化为0
    // 初始化m和v为0
    mWeights.resize(inputSize, std::vector<float>(outputSize, 0));
    vWeights.resize(inputSize, std::vector<float>(outputSize, 0));
    mBiases.resize(outputSize, 0);
    vBiases.resize(outputSize, 0);
}


std::vector<std::vector<float>> DenseLayer::forward(const std::vector<std::vector<float>>& inputs) {
    // 保存输入，供反向传播时使用
    this->inputs = inputs;
    // 计算加权和
    auto weightedInputs = matOps->multiply(inputs, weights); // 确保multiply支持二维输入和权重

    // 增加偏置
    for (auto& row : weightedInputs) {
        for (size_t i = 0; i < row.size(); i++) {
            row[i] += biases[i];
        }
    }
    if (activationFunction == nullptr) {
        this->outputs = weightedInputs; // 直接使用加权和加偏置作为输出
    } else {
        // 应用激活函数
        this->zOutputs = weightedInputs; // 保存可能需要加上偏差后的结果
        this->outputs = activationFunction->apply(zOutputs); // 使用激活函数处理
    }
    return this->outputs;
}


std::vector<std::vector<float>> DenseLayer::backward(const std::vector<std::vector<float>>& outputErrorGradient) {
    std::vector<std::vector<float>> delta;
    if(activationFunction != nullptr){
        // 激活函数的导数应用于输出误差梯度，假设applyReLUDerivative支持二维数组
        auto activationDerivative = activationFunction->applyDerivative(this->zOutputs);
        delta = matOps->elementwiseMultiply(outputErrorGradient, activationDerivative);
    }else{
        // 没有激活函数，直接使用输出误差梯度
        delta = outputErrorGradient;
    }

    // 输入转置
    auto inputsTransposed = matOps->transpose(inputs);
    
    // 计算权重梯度
    auto weightGradients = matOps->multiply(inputsTransposed, delta);

    // 计算偏置梯度（对每个delta求和）
    std::vector<float> biasGradients(biases.size(), 0.0);

    for (const auto& row : delta) {
        for (size_t i = 0; i < row.size(); ++i) {
            biasGradients[i] += row[i]; // 累加delta中每一行的元素到对应的偏置梯度
        }
    }

    t++; // 更新步骤计数

    // 使用Adam更新权重和偏置
    for (size_t i = 0; i < weights.size(); ++i) {
        for (size_t j = 0; j < weights[i].size(); ++j) {
            // 更新m和v
            mWeights[i][j] = beta1 * mWeights[i][j] + (1 - beta1) * weightGradients[i][j];
            vWeights[i][j] = beta2 * vWeights[i][j] + (1 - beta2) * std::pow(weightGradients[i][j], 2);

            // 计算偏差校正后的m和v
            float mHat = mWeights[i][j] / (1 - std::pow(beta1, t));
            float vHat = vWeights[i][j] / (1 - std::pow(beta2, t));

            // 更新权重
            weights[i][j] -= learningRate * mHat / (std::sqrt(vHat) + epsilon);
        }
    }

    for (size_t i = 0; i < biases.size(); ++i) {
        // 更新m和v
        mBiases[i] = beta1 * mBiases[i] + (1 - beta1) * biasGradients[i];
        vBiases[i] = beta2 * vBiases[i] + (1 - beta2) * std::pow(biasGradients[i], 2);

        // 计算偏差校正后的m和v
        float mHat = mBiases[i] / (1 - std::pow(beta1, t));
        float vHat = vBiases[i] / (1 - std::pow(beta2, t));

        // 更新偏置
        biases[i] -= learningRate * mHat / (std::sqrt(vHat) + epsilon);
    }

    // 计算输入误差梯度以反向传播到前一层
    auto weightsTransposed = matOps->transpose(weights);
    auto inputErrorGradients = matOps->multiply(delta, weightsTransposed);

    return inputErrorGradients;
}

#endif // DENSELAYER_H
