/*
全连接层实现
实现神经网络的全连接操作
支持权重管理和梯度计算
*/
#include "FullConnectedLayer.h"
#include "PoolingLayer.h"
#include "LayerManager.h"
#include "ActivationFunction.h"
#include "LossFunction.h"
#include <stdexcept>

FullConnectedLayer::FullConnectedLayer(int layerIndex, const LayerType& layerType, int inputSize, int outputSize, const string& activationName, const string& lossName)
    : Layer(layerIndex, layerType, activationName, lossName), inputSize(inputSize), outputSize(outputSize) {
    if (inputSize <= 0 || outputSize <= 0) {
        throw invalid_argument("Input size and output size must be positive.");
    }

    weights = Tensor<double, 2>(outputSize, inputSize);
    biases = Tensor<double, 1>(outputSize);
    inputsGradient = Tensor<double, 1>(inputSize);
    outputsGradient = Tensor<double, 1>(outputSize);
    weightsGradient = Tensor<double, 2>(outputSize, inputSize);
    biasesGradient = Tensor<double, 1>(outputSize);
    inputsDelta = Tensor<double, 1>(inputSize);
    outputsDelta = Tensor<double, 1>(outputSize);
    weightsDelta = Tensor<double, 2>(outputSize, inputSize);
    biasesDelta = Tensor<double, 1>(outputSize);
    prevBatchInputsDelta = Tensor<double, 1>(inputSize);
    prevBatchOutputsDelta = Tensor<double, 1>(outputSize);
    prevBatchWeightsDelta = Tensor<double, 2>(outputSize, inputSize);
    prevBatchBiasesDelta = Tensor<double, 1>(outputSize);
    #ifdef USE_USER_DEFINED_TENSOR 
        if (weights.getSize() == 0 || biases.getSize() == 0) {
            throw runtime_error("Memory allocation failed for weights or biases.");
        }
    #endif
    #ifdef USE_EIGEN_TENSOR 
        if (weights.size() == 0 || biases.size() == 0) {
            throw runtime_error("Memory allocation failed for weights or biases.");
        }
    #endif
    string lowerActivationName = activationName;
    transform(lowerActivationName.begin(), lowerActivationName.end(), lowerActivationName.begin(), ::tolower);
    if (lowerActivationName != "sigmoid" && lowerActivationName != "relu" && lowerActivationName != "tanh") {
        throw invalid_argument("Invalid activation function name");
    }
    double scale = 1.0;
    if (activationName == "sigmoid" || activationName == "tanh") {
        scale = sqrt(2.0 / (inputSize + outputSize)); // Xavier/Glorot initialization
    }
    else if (activationName == "relu") {
        scale = sqrt(2.0 / inputSize); // He initialization
    }
    weights.setRandom();
    weights = weights * scale;
    biases.setRandom();
    inputsGradient.setZero();
    outputsGradient.setZero();
    weightsGradient.setZero();
    biasesGradient.setZero();
    inputsDelta.setZero();
    outputsDelta.setZero();
    weightsDelta.setZero();
    biasesDelta.setZero();
    prevBatchInputsDelta.setZero();
    prevBatchOutputsDelta.setZero();
    prevBatchWeightsDelta.setZero();
    prevBatchBiasesDelta.setZero();
    activation = ActivationFactory<double, 1>::create(activationName);
    loss = LossFactory::create(lossName);
}
FullConnectedLayer::~FullConnectedLayer() {}
void FullConnectedLayer::setPrevLayer(const shared_ptr<Layer>& prevLayer) {
    this->prevLayer = prevLayer;
    if (prevLayer->getLayerType() == LayerType::POOLING) {
        this->prevPoolingLayer = dynamic_pointer_cast<PoolingLayer>(prevLayer);
    } else if (prevLayer->getLayerType() == LayerType::FC) {
        this->prevFcLayer = dynamic_pointer_cast<FullConnectedLayer>(prevLayer);
    } else {
        throw invalid_argument("Invalid layer type when setting previous layer of full connected layer " + to_string(layerIndex) + " : " + to_string(prevLayer->getLayerType()));
    }
}
void FullConnectedLayer::setNextLayer(const shared_ptr<Layer>& nextLayer) {
    this->nextLayer = nextLayer;
    if (nextLayer->getLayerType() == LayerType::FC) {
        this->nextFcLayer = dynamic_pointer_cast<FullConnectedLayer>(nextLayer);
    } else {
        throw invalid_argument("Invalid layer type when setting next layer of full connected layer " + to_string(layerIndex) + " : " + to_string(prevLayer->getLayerType()));
    }
}
const int FullConnectedLayer::getInputSize() const {
    return inputSize;
}
const int FullConnectedLayer::getOutputSize() const {
    return outputSize;
}
const Tensor<double, 1>& FullConnectedLayer::getInputFeatureMap() const {
    return inputFeatureMap;
}
const Tensor<double, 1>& FullConnectedLayer::getOutputFeatureMap() const {
    return outputFeatureMap;
}
const Tensor<double, 2>& FullConnectedLayer::getWeights() const {
    return weights;
}
const Tensor<double, 1>& FullConnectedLayer::getBiases() const {
    return biases;
}
const Tensor<double, 1>& FullConnectedLayer::getInputsGradient() const {
    return inputsGradient;
}
const Tensor<double, 1>& FullConnectedLayer::getOutputsGradient() const {
    return outputsGradient;
}
const Tensor<double, 2>& FullConnectedLayer::getWeightsGradient() const {
    return weightsGradient;
}
const Tensor<double, 1>& FullConnectedLayer::getBiasesGradient() const {
    return biasesGradient;
}
const Tensor<double, 1>& FullConnectedLayer::getInputsDelta() const {
    return inputsDelta;
}
const Tensor<double, 1>& FullConnectedLayer::getOutputsDelta() const {
    return outputsDelta;
}
const Tensor<double, 2>& FullConnectedLayer::getWeightsDelta() const {
    return weightsDelta;
}
const Tensor<double, 1>& FullConnectedLayer::getBiasesDelta() const {
    return biasesDelta;
}
void FullConnectedLayer::setInputFeatureMap(const Tensor<double, 1>& inputFeatureMap) {
    this->inputFeatureMap = inputFeatureMap;
    // setInputSize(inputFeatureMap.getSize());
}
void FullConnectedLayer::setOutputFeatureMap(const Tensor<double, 1>& outputFeatureMap) {
    this->outputFeatureMap = outputFeatureMap;
}
void FullConnectedLayer::setWeights(const Tensor<double, 2>& weights) {
    this->weights = weights;
}
void FullConnectedLayer::setBiases(const Tensor<double, 1>& biases) {
    this->biases = biases;
}
void FullConnectedLayer::setInputsGradient(const Tensor<double, 1>& inputsGradient) {
    this->inputsGradient = inputsGradient;
}
void FullConnectedLayer::setOutputsGradient(const Tensor<double, 1>& outputsGradient) {
    this->outputsGradient = outputsGradient;
}
void FullConnectedLayer::setWeightsGradient(const Tensor<double, 2>& weightsGradient) {
    this->weightsGradient = weightsGradient;
}
void FullConnectedLayer::setBiasesGradient(const Tensor<double, 1>& biasesGradient) {
    this->biasesGradient = biasesGradient;
}
void FullConnectedLayer::setInputsDelta(const Tensor<double, 1>& inputsDelta) {
    this->inputsDelta = inputsDelta;
}
void FullConnectedLayer::setOutputsDelta(const Tensor<double, 1>& outputsDelta) {
    this->outputsDelta = outputsDelta;
}
void FullConnectedLayer::setWeightsDelta(const Tensor<double, 2>& weightsDelta) {
    this->weightsDelta = weightsDelta;
}
void FullConnectedLayer::setBiasesDelta(const Tensor<double, 1>& biasesDelta) {
    this->biasesDelta = biasesDelta;
}
void FullConnectedLayer::calculateOutputFeatureMap() {
    auto outputFeatureMapArray = (weights.matrix() * inputFeatureMap.matrix()).array();
    outputFeatureMap = TensorMap<double, 1>(outputFeatureMapArray.getData(), outputFeatureMapArray.getSize()).tensor() + biases;
    // outputFeatureMap = weights.contract(inputFeatureMap, Eigen::array<Eigen::IndexPair<int>, 1>{{{1, 0}}}) + biases;
    outputFeatureMap = activation->activateToTensor(outputFeatureMap);
}
void FullConnectedLayer::calculateInputsGradient(const Tensor<double, 1>& inputFeatureMap, const Tensor<double, 1>& outputsGradient) {
    auto result = (weights.matrix().transpose() * outputsGradient.matrix()).array();
    inputsGradient = TensorMap<double, 1>(result.getData(), result.getSize()).tensor();
}
void FullConnectedLayer::calculateOutputsGradient(const Tensor<double, 1>& targetFeatureMap) {
    Tensor<double, 1> derivativeLoss = loss->derivativeToTensor(outputFeatureMap, targetFeatureMap);
    Tensor<double, 1> derivativeActivation = activation->derivativeToTensor(outputFeatureMap);
    outputsGradient = derivativeLoss.cwiseProduct(derivativeActivation);
}
void FullConnectedLayer::calculateWeightsGradient(Tensor<double, 1>& inputFeatureMap, const Tensor<double, 1>& outputsGradient) {
    weightsGradient = Tensor<double, 2>(outputSize, inputSize);
    auto weightsGradientArray = (outputsGradient.matrix().outerProduct(inputFeatureMap.matrix().transpose())).array();
    weightsGradient = TensorMap<double, 2>(weightsGradientArray.getData(), weightsGradientArray.getSize()).tensor();
}
void FullConnectedLayer::calculateBiasesGradient(const Tensor<double, 1>& outputsGradient) {
    // biasesGradient = outputsGradient.sum(Eigen::array<int, 1>({0}));
    // biasesGradient = outputsGradient.sum();
    double nextLayerOutputsGradient = nextFcLayer->getOutputsGradient().sum();
    biasesGradient = nextLayerOutputsGradient;
}
void FullConnectedLayer::calculateHiddenLayerOutputsGradient(Tensor<double, 1>& outputFeatureMap) {
    if (nextFcLayer == nullptr) {
        throw runtime_error("Next layer is null.");
    }
    if (nextFcLayer->getLayerIndex() != layerIndex + 1) {
        throw runtime_error("Next layer index is not correct.");
    }
    if (nextFcLayer->getInputSize() != outputSize) {
        throw runtime_error("Next layer input size is not correct.");
    }
    // Tensor<double, 1> nextGradientSum = nextLayer->getOutputsGradient().sum(Eigen::array<Eigen::Index, 1>({0}));
    double nextGradientSum = nextFcLayer->getOutputsGradient().sum();
    outputsGradient = activation->derivativeToTensor(outputFeatureMap) * nextGradientSum;
}
void FullConnectedLayer::calculateOutputLayerOutputsGradient(const Tensor<double, 1>& targetFeatureMap) {
    calculateOutputsGradient(targetFeatureMap);
}
void FullConnectedLayer::calculateInputsDelta(double learningRate, double momentum) {
    inputsDelta = inputsGradient * learningRate +  prevBatchInputsDelta * momentum;
    prevBatchInputsDelta = inputsDelta;
}
void FullConnectedLayer::calculateOutputsDelta(double learningRate, double momentum) {
    outputsDelta = outputsGradient * learningRate +  prevBatchOutputsDelta * momentum;
    prevBatchOutputsDelta = outputsDelta;
}
void FullConnectedLayer::calculateWeightsDelta(double learningRate, double momentum) {
    weightsDelta = weightsGradient * learningRate +  prevBatchWeightsDelta * momentum;
    prevBatchWeightsDelta = weightsDelta;
}
void FullConnectedLayer::calculateBiasesDelta(double learningRate, double momentum) {
    biasesDelta = biasesGradient * learningRate +  prevBatchBiasesDelta * momentum;
    prevBatchBiasesDelta = biasesDelta;
}
void FullConnectedLayer::forward(const Tensor<double, 1>& inputFeatureMap) {
    setInputFeatureMap(inputFeatureMap);
    calculateOutputFeatureMap();
}
void FullConnectedLayer::backward(const Tensor<double, 1>& targetFeatureMap) {
    if (prevLayer == nullptr) {
        return;
    }
    if (prevFcLayer != nullptr) {
        if (prevFcLayer->getLayerIndex() != layerIndex - 1) {
            throw runtime_error("Previous full connected layer index is not correct.");
        }
        if (prevFcLayer->getOutputSize() != inputSize) {
            throw runtime_error("Previous full connected layer output size is not correct.");
        }
    }
    if (prevPoolingLayer != nullptr) {
        if (prevPoolingLayer->getLayerIndex() != layerIndex - 1) {
            throw runtime_error("Previous pooling layer index is not correct.");
        }
        if (prevPoolingLayer->getOutputFeatureMapForFc().dimension(0) != inputSize) {
            throw runtime_error("Previous pooling layer output size is not correct.");
        }
    }
    if (nextFcLayer != nullptr) {
        if (nextFcLayer->getLayerIndex() != layerIndex + 1) {
            throw runtime_error("Next full connected layer index is not correct.");
        }
        if (nextFcLayer->getInputSize() != outputSize) {
            throw runtime_error("Next full connected layer input size is not correct.");
        }
    }
    if (nextLayer == nullptr) {
        calculateOutputLayerOutputsGradient(targetFeatureMap);
    }
    else {
        calculateHiddenLayerOutputsGradient(outputFeatureMap);
    }
    calculateInputsGradient(inputFeatureMap, outputsGradient);
    calculateWeightsGradient(inputFeatureMap, outputsGradient);
    calculateBiasesGradient(outputsGradient);
}
void FullConnectedLayer::updateWeights(double learningRate, double momentum) {
    // weights = weights - learningRate * weightsGradient - weightsGradient * momentum;
    weights += weightsDelta;
}
void FullConnectedLayer::updateBiases(double learningRate, double momentum) {
    // biases = biases - biasesGradient * learningRate - biasesGradient * momentum;
    biases += biasesDelta;
}
void FullConnectedLayer::print() const {
    cout << "FullConnectedLayer: " << layerIndex << endl;
    cout << "Layer Type: " << to_string(layerType) << endl;
    cout << "Input size: " << inputSize << endl;
    cout << "Output size: " << outputSize << endl;
    cout << "Input feature map: " << endl;
    for (int i = 0; i < inputFeatureMap.getSize(); i++) {
        cout << inputFeatureMap(i) << " ";
    }
    cout << endl;
    cout << "Output feature map: " << endl;
    for (int i = 0; i < outputFeatureMap.getSize(); i++) {
        cout << outputFeatureMap(i) << " ";
    }
    cout << endl;
    cout << "Weights: " << endl;
    for (int i = 0; i < weights.getSize(); i++) {
        cout << weights(i) << " ";
    }
    cout << endl;
    cout << "Biases: " << endl;
    for (int i = 0; i < biases.getSize(); i++) {
        cout << biases(i) << " ";
    }
    cout << endl;
    cout << "Inputs gradient: " << endl;
    for (int i = 0; i < inputsGradient.getSize(); i++) {
        cout << inputsGradient(i) << " ";
    }
    cout << endl;
    cout << "Outputs gradient: " << endl;
    for (int i = 0; i < outputsGradient.getSize(); i++) {
        cout << outputsGradient(i) << " ";
    }
    cout << endl;
    cout << "Weights gradient: " << endl;
    for (int i = 0; i < weightsGradient.getSize(); i++) {
        cout << weightsGradient(i) << " ";
    }
    cout << endl;
    cout << "Biases gradient: " << endl;
    for (int i = 0; i < biasesGradient.getSize(); i++) {
        cout << biasesGradient(i) << " ";
    }
    cout << endl;
    cout << "Inputs delta: " << endl;
    for (int i = 0; i < inputsDelta.getSize(); i++) {
        cout << inputsDelta(i) << " ";
    }
    cout << endl;
    cout << "Outputs delta: " << endl;
    for (int i = 0; i < outputsDelta.getSize(); i++) {
        cout << outputsDelta(i) << " ";
    }
    cout << endl;
    cout << "Weights delta: " << endl;
    for (int i = 0; i < weightsDelta.getSize(); i++) {
        cout << weightsDelta(i) << " ";
    }
    cout << endl;
    cout << "Biases delta: " << endl;
    for (int i = 0; i < biasesDelta.getSize(); i++) {
        cout << biasesDelta(i) << " ";
    }
    cout << endl;
    cout << "Activation: " << ActivationFactory<double, 1>::getName(activation.get()) << endl;
    cout << "Loss: " << LossFactory::getName(loss.get()) << endl;
}