/*
全连接层头文件
定义神经网络的全连接操作和权重管理
支持任意输入输出维度的全连接层
*/
#ifndef FULL_CONNECTED_LAYER_H
#define FULL_CONNECTED_LAYER_H

#include "Macros.h"
#ifdef USE_USER_DEFINED_TENSOR 
    #undef USE_EIGEN_TENSOR
    #include "Tensor.h"
#endif
#ifdef USE_EIGEN_TENSOR
    #undef USE_USER_DEFINED_TENSOR
    #include <Eigen/Core>
    #include <Eigen/Dense>
#endif
#include "Layer.h"
#include "ActivationFunction.h"
#include "LossFunction.h"
#include <string>
#include <vector>
#include <memory>

#ifdef USE_EIGEN_TENSOR
// Enable Eigen parallelization
#define EIGEN_USE_THREADS
#endif

using namespace std;
#ifdef USE_EIGEN_TENSOR
using namespace Eigen;
#endif
#ifdef USE_USER_DEFINED_TENSOR
using namespace UserDefinedTensor;
#endif

class PoolingLayer;

class FullConnectedLayer : public Layer{
public:
    FullConnectedLayer(int layerIndex, const LayerType& layerType, int inputSize, int outputSize, const string& activationName = "relu", const string& lossName = "mse");
    ~FullConnectedLayer();
    void setPrevLayer(const shared_ptr<Layer>& prevLayer) override;
    void setNextLayer(const shared_ptr<Layer>& nextLayer) override;
    const int getInputSize() const;
    const int getOutputSize() const;
    const Tensor<double, 1>& getInputFeatureMap() const;
    const Tensor<double, 1>& getOutputFeatureMap() const;
    const Tensor<double, 2>& getWeights() const;
    const Tensor<double, 1>& getBiases() const;
    const Tensor<double, 1>& getInputsGradient() const;
    const Tensor<double, 1>& getOutputsGradient() const;
    const Tensor<double, 2>& getWeightsGradient() const;
    const Tensor<double, 1>& getBiasesGradient() const;
    const Tensor<double, 1>& getInputsDelta() const;
    const Tensor<double, 1>& getOutputsDelta() const;
    const Tensor<double, 2>& getWeightsDelta() const;
    const Tensor<double, 1>& getBiasesDelta() const;
    void setInputSize(int inputSize);
    void setInputFeatureMap(const Tensor<double, 1>& inputFeatureMap);
    void setOutputFeatureMap(const Tensor<double, 1>& outputFeatureMap);
    void setWeights(const Tensor<double, 2>& weights);
    void setBiases(const Tensor<double, 1>& biases);
    void setInputsGradient(const Tensor<double, 1>& inputsGradient);
    void setOutputsGradient(const Tensor<double, 1>& outputsGradient);
    void setWeightsGradient(const Tensor<double, 2>& weightsGradient);
    void setBiasesGradient(const Tensor<double, 1>& biasesGradient);
    void setInputsDelta(const Tensor<double, 1>& inputsDelta);
    void setOutputsDelta(const Tensor<double, 1>& outputsDelta);
    void setWeightsDelta(const Tensor<double, 2>& weightsDelta);
    void setBiasesDelta(const Tensor<double, 1>& biasesDelta);
    void calculateOutputFeatureMap();
    void calculateInputsGradient(const Tensor<double, 1>& inputFeatureMap, const Tensor<double, 1>& outputsGradient);
    void calculateOutputsGradient(const Tensor<double, 1>& targetFeatureMap);
    void calculateWeightsGradient(Tensor<double, 1>& inputFeatureMap, const Tensor<double, 1>& outputsGradient);
    void calculateBiasesGradient(const Tensor<double, 1>& outputsGradient);
    void calculateHiddenLayerOutputsGradient(Tensor<double, 1>& outputFeatureMap);
    void calculateOutputLayerOutputsGradient(const Tensor<double, 1>& targetFeatureMap);
    void calculateInputsDelta(double learningRate, double momentum);
    void calculateOutputsDelta(double learningRate, double momentum);
    void calculateWeightsDelta(double learningRate, double momentum);
    void calculateBiasesDelta(double learningRate, double momentum);
    void forward(const Tensor<double, 1>& inputFeatureMap);
    void backward(const Tensor<double, 1>& targetFeatureMap);
    void updateWeights(double learningRate, double momentum);
    void updateBiases(double learningRate, double momentum);
    void print() const;
    
private:
    int inputSize;
    int outputSize;
    Tensor<double, 1> inputFeatureMap;
    Tensor<double, 1> outputFeatureMap;
    Tensor<double, 2> weights;
    Tensor<double, 1> biases;
    Tensor<double, 1> inputsGradient;
    Tensor<double, 1> outputsGradient;
    Tensor<double, 2> weightsGradient;
    Tensor<double, 1> biasesGradient;
    Tensor<double, 1> inputsDelta;
    Tensor<double, 1> outputsDelta;
    Tensor<double, 2> weightsDelta;
    Tensor<double, 1> biasesDelta;
    Tensor<double, 1> prevBatchInputsDelta;
    Tensor<double, 1> prevBatchOutputsDelta;
    Tensor<double, 2> prevBatchWeightsDelta;
    Tensor<double, 1> prevBatchBiasesDelta;
    shared_ptr<FullConnectedLayer> nextFcLayer;
    shared_ptr<FullConnectedLayer> prevFcLayer;
    shared_ptr<PoolingLayer> prevPoolingLayer;
    unique_ptr<ActivationFunction<double, 1> > activation;
    unique_ptr<LossFunction> loss;
};


#endif // FULL_CONNECTED_LAYER_H
