/*
卷积层实现
实现卷积操作、前向传播和反向传播
支持多种卷积核配置和参数更新
*/
#include "ConvLayer.h"
#include "Filter.h"
#include "ActivationFunction.h"
#include "LossFunction.h"
#include <iostream>
#include <stdexcept>

using namespace std;

void zeroPadding(const Tensor<double, 3>& inputFeatureMap, Tensor<double, 3>& outputFeatureMap, int padding) {
    int inputWidth = inputFeatureMap.dimension(0);
    int inputHeight = inputFeatureMap.dimension(1);
    int inputChannels = inputFeatureMap.dimension(2);
    int outputWidth = inputWidth + 2 * padding;
    int outputHeight = inputHeight + 2 * padding;

    outputFeatureMap = Tensor<double, 3>(outputWidth, outputHeight, inputChannels);
    outputFeatureMap.setZero();

    for (int c = 0; c < inputChannels; ++c) {
        for (int h = 0; h < inputHeight; ++h) {
            for (int w = 0; w < inputWidth; ++w) {
                outputFeatureMap(w + padding, h + padding, c) = inputFeatureMap(w, h, c);
            }
        }
    }
}
void conv2d(const Tensor<double, 3>& inputFeatureMap, const Filter& filter, Tensor<double, 3>& outputFeatureMap, int widthStride, int heightStride) {
    int inputWidth = inputFeatureMap.dimension(0);
    int inputHeight = inputFeatureMap.dimension(1);
    int inputChannels = inputFeatureMap.dimension(2);
    int filterWidth = filter.getFilterWidth();
    int filterHeight = filter.getFilterHeight();
    int outputWidth = (inputWidth - filterWidth) / widthStride + 1;
    int outputHeight = (inputHeight - filterHeight) / heightStride + 1;
    int outputChannels = filter.getInputChannels();
    
    outputFeatureMap = Tensor<double, 3>(outputWidth, outputHeight, outputChannels);
    outputFeatureMap.setZero();
    Tensor<double, 3> weights = filter.getWeights();
    Tensor<double, 1> biases = filter.getBiases();
    
    for (int c = 0; c < outputChannels; ++c) {
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                for (int i = 0; i < filterWidth; ++i) {
                    for (int j = 0; j < filterHeight; ++j) {
                        for (int k = 0; k < inputChannels; ++k) {
                            outputFeatureMap(w, h, c) += inputFeatureMap(w * widthStride + i, h * heightStride + j, k) * weights(i, j, k);
                        }
                    }
                }
                outputFeatureMap(w, h, c) += biases(c);
            }
        }
    }
}
void convolve(const Tensor<double, 3>& inputFeatureMap, const Filter& filter, Tensor<double, 3>& outputFeatureMap, int widthStride, int heightStride, int padding) {
    Tensor<double, 3> paddedInput;
    zeroPadding(inputFeatureMap, paddedInput, padding);
    conv2d(paddedInput, filter, outputFeatureMap, widthStride, heightStride);
}

ConvLayer::ConvLayer(int layerIndex, const LayerType& layerType, int inputWidth, int inputHeight, int inputChannels, int numFilters, int filterWidth, int filterHeight, int widthStride, int heightStride, int padding, const string& activationName, const string& lossName)
    : Layer(layerIndex, layerType, activationName, lossName), inputWidth(inputWidth), inputHeight(inputHeight), inputChannels(inputChannels), numFilters(numFilters), filterWidth(filterWidth), filterHeight(filterHeight), widthStride(widthStride), heightStride(heightStride), padding(padding) {
    if (filterWidth <= 0 || filterHeight <= 0 || widthStride <= 0 || heightStride <= 0 || padding < 0) {
        throw invalid_argument("Invalid filter or stride parameters.");
    }
    if (activationName.empty() || lossName.empty()) {
        throw invalid_argument("Activation and loss function names cannot be empty.");
    }
    // Calculate the output dimensions of the convolutional layer
    calculateOutputDimensions(inputWidth, inputHeight, filterWidth, filterHeight, widthStride, heightStride, padding);
    outputFeatureMap = Tensor<double, 3>(outputWidth, outputHeight, outputChannels);
    inputsGradient = Tensor<double, 3>(outputWidth, outputHeight, outputChannels);
    outputsGradient = Tensor<double, 3>(outputWidth, outputHeight, outputChannels);
    weightsGradient = Tensor<double, 3>(filterWidth, filterHeight, inputChannels * numFilters);
    biasesGradient = Tensor<double, 1>(numFilters);
    filters.resize(numFilters);
    for (int i = 0; i < numFilters; ++i) {
        filters[i] = make_shared<Filter>(i, filterWidth, filterHeight, inputChannels);
    }
    prevBatchInputsDelta = Tensor<double, 3>(inputWidth, inputHeight, inputChannels);
    prevBatchInputsDelta.setZero();
    prevBatchOutputsDelta = Tensor<double, 3>(outputWidth, outputHeight, outputChannels);
    prevBatchOutputsDelta.setZero();
    prevBatchWeightsDelta = Tensor<double, 3>(filterWidth, filterHeight, inputChannels * numFilters);
    prevBatchWeightsDelta.setZero();
    prevBatchBiasesDelta = Tensor<double, 1>(numFilters);
    prevBatchBiasesDelta.setZero();
    this->activation = ActivationFactory<double, 3>::create(activationName);
    this->loss = LossFactory::create(lossName);
}
ConvLayer::~ConvLayer() {}
void ConvLayer::setPrevLayer(const shared_ptr<Layer>& prevLayer) {
    this->prevLayer = prevLayer;
    if (prevLayer->getLayerType() == LayerType::CONV) {
        this->prevConvLayer = dynamic_pointer_cast<ConvLayer>(prevLayer);
    } else if (prevLayer->getLayerType() == LayerType::POOLING) {
        this->prevPoolingLayer = dynamic_pointer_cast<PoolingLayer>(prevLayer);
    } else {
        throw invalid_argument("Invalid layer type when setting previous layer of convolutional layer " + to_string(layerIndex) + " : " + to_string(prevLayer->getLayerType()));
    }
}
void ConvLayer::setNextLayer(const shared_ptr<Layer>& nextLayer) {
    this->nextLayer = nextLayer;
    if (nextLayer->getLayerType() == LayerType::POOLING) {
        this->nextPoolingLayer = dynamic_pointer_cast<PoolingLayer>(nextLayer);
    } else if (nextLayer->getLayerType() == LayerType::FC) {
        this->nextFcLayer = dynamic_pointer_cast<FullConnectedLayer>(nextLayer);
    } else {
        throw invalid_argument("Invalid layer type when setting next layer of convolutional layer " + to_string(layerIndex) + " : " + to_string(prevLayer->getLayerType()));
    }
}
int ConvLayer::getInputWidth() const {
    return inputWidth;
}
int ConvLayer::getInputHeight() const {
    return inputHeight;
}
int ConvLayer::getInputChannels() const {
    return inputChannels;
}
const Tensor<double, 3>& ConvLayer::getInputFeatureMap() const {
    return inputFeatureMap;
}
int ConvLayer::getNumFilters() const {
    return numFilters;
}
const vector<shared_ptr<Filter> >& ConvLayer::getFilters() const {
    return filters;
}
int ConvLayer::getFilterWidth() const {
    return filterWidth;
}
int ConvLayer::getFilterHeight() const {
    return filterHeight;
}
int ConvLayer::getWidthStride() const {
    return widthStride;
}
int ConvLayer::getHeightStride() const {
    return heightStride;
}
int ConvLayer::getPadding() const {
    return padding;
}
int ConvLayer::getOutputWidth() const {
    return outputWidth;
}
int ConvLayer::getOutputHeight() const {
    return outputHeight;
}
int ConvLayer::getOutputChannels() const {
    return outputChannels;
}
int ConvLayer::getOutputWidthForPooling() const {
    return outputWidthForPooling;
}
int ConvLayer::getOutputHeightForPooling() const {
    return outputHeightForPooling;
}
int ConvLayer::getOutputChannelsForPooling() const {
    return outputChannelsForPooling;
}
int ConvLayer::getOutputSizeForFc() const {
    return outputSizeForFc;
}
const Tensor<double, 3>& ConvLayer::getOutputFeatureMap() const {
    return outputFeatureMap;
}
const Tensor<double, 3>& ConvLayer::getOutputFeatureMapForPooling() const {
    return outputFeatureMapForPooling;
}
const Tensor<double, 1>& ConvLayer::getOutputFeatureMapForFc() const {
    return outputFeatureMapForFc;
}
const Tensor<double, 3> ConvLayer::getInputsGradient() const {
    return inputsGradient;
}
const Tensor<double, 3> ConvLayer::getOutputsGradient() const {
    return outputsGradient;
}
const Tensor<double, 3> ConvLayer::getWeightsGradient() const {
    return weightsGradient;
}
const Tensor<double, 1> ConvLayer::getBiasesGradient() const {
    return biasesGradient;
}
const Tensor<double, 3> ConvLayer::getInputsDelta() const {
    return inputsDelta;
}
const Tensor<double, 3> ConvLayer::getOutputsDelta() const {
    return outputsDelta;
}
const Tensor<double, 3> ConvLayer::getWeightsDelta() const {
    return weightsDelta;
}
const Tensor<double, 1> ConvLayer::getBiasesDelta() const {
    return biasesDelta;
}
void ConvLayer::setInputFeatureMap(const Tensor<double, 3>& inputFeatureMap) {
    this->inputFeatureMap = inputFeatureMap;
    // setInputWidth(inputFeatureMap.dimension(0));
    // setInputHeight(inputFeatureMap.dimension(1));
    // setInputChannels(inputFeatureMap.dimension(2));
}
void ConvLayer::setInputWidth(int inputWidth) {
    this->inputWidth = inputWidth;
}
void ConvLayer::setInputHeight(int inputHeight) {
    this->inputHeight = inputHeight;
}
void ConvLayer::setInputChannels(int inputChannels) {
    this->inputChannels = inputChannels;
}
void ConvLayer::setNumFilters(int numFilters) {
    this->numFilters = numFilters;
}
void ConvLayer::setFilters(const vector<shared_ptr<Filter> >& filters) {
    this->filters = filters;
}
void ConvLayer::setFilterWidth(int filterWidth) {
    this->filterWidth = filterWidth;
}
void ConvLayer::setFilterHeight(int filterHeight) {
    this->filterHeight = filterHeight;
}
void ConvLayer::setWidthStride(int widthStride) {
    this->widthStride = widthStride;
}
void ConvLayer::setHeightStride(int heightStride) {
    this->heightStride = heightStride;
}
void ConvLayer::setPadding(int padding) {
    this->padding = padding;
}
void ConvLayer::setOutputWidth(int outputWidth) {
    this->outputWidth = outputWidth;
}
void ConvLayer::setOutputHeight(int outputHeight) {
    this->outputHeight = outputHeight;
}
void ConvLayer::setOutputChannels(int outputChannels) {
    this->outputChannels = outputChannels;
}
void ConvLayer::setOutputWidthForPooling(int outputWidthForPooling) {
    this->outputWidthForPooling = outputWidthForPooling;
}
void ConvLayer::setOutputHeightForPooling(int outputHeightForPooling) {
    this->outputHeightForPooling = outputHeightForPooling;
}
void ConvLayer::setOutputChannelsForPooling(int outputChannelsForPooling) {
    this->outputChannelsForPooling = outputChannelsForPooling;
}
void ConvLayer::setOutputSizeForFc(int outputSizeForFc) {
    this->outputSizeForFc = outputSizeForFc;
}
void ConvLayer::calculateOutputDimensions(int inputWidth, int inputHeight, int filterWidth, int filterHeight, int widthStride, int heightStride, int padding) {
    this->outputWidth = (inputWidth - filterWidth + 2 * padding) / widthStride + 1;
    this->outputHeight = (inputHeight - filterHeight + 2 * padding) / heightStride + 1;
    this->outputChannels = numFilters;
    this->outputWidthForPooling = outputWidth;
    this->outputHeightForPooling = outputHeight;
    this->outputChannelsForPooling = outputChannels;
    this->outputSizeForFc = outputWidth * outputHeight * outputChannels;
}
void ConvLayer::calculateOutputFeatureMap() {
    for (int c = 0; c < outputChannels; ++c) {  // outputChannels = numFilters
        Tensor<double, 3> filterOutput;
        convolve(inputFeatureMap, *filters[c], filterOutput, widthStride, heightStride, padding);
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                outputFeatureMap(w, h, c) += filterOutput(w, h, 0);
                outputFeatureMap(w, h, c) += filters[c]->getBiases()(c);
            }
        }
    }
    outputFeatureMap = activation->activateToTensor(outputFeatureMap);
    if (this->nextLayer->getLayerType() == LayerType::POOLING) {
        transformOutputFeatureMapToPooling(this->outputFeatureMap);
    } else if (this->nextLayer->getLayerType() == LayerType::FC) {
        transformOutputFeatureMapToFc(this->outputFeatureMap);
    }
}
void ConvLayer::transformOutputFeatureMapToPooling(Tensor<double, 3>& outputFeatureMap) {
    this->outputFeatureMapForPooling = outputFeatureMap.reshape(outputFeatureMap.dimension(0), outputFeatureMap.dimension(1), outputFeatureMap.dimension(2));
}
void ConvLayer::transformOutputFeatureMapToFc(Tensor<double, 3>& outputFeatureMap) {
    int totalSize = outputFeatureMap.dimension(0) * outputFeatureMap.dimension(1) * outputFeatureMap.dimension(2);
    this->outputFeatureMapForFc = outputFeatureMap.reshape(totalSize);
}
void ConvLayer::calculateInputsGradient() {
    for (int c = 0; c < inputChannels; ++c) {
        for (int h = 0; h < inputHeight; ++h) {
            for (int w = 0; w < inputWidth; ++w) {
                inputsGradient(w, h, c) = 0.0;
            }
        }
    }
    for (int c = 0; c < inputChannels; ++c) {
        for (int oc = 0; oc < outputChannels; ++oc) {
            for (int oh = 0; oh < outputHeight; ++oh) {
                for (int ow = 0; ow < outputWidth; ++ow) {
                    for (int i = 0; i < filterWidth; ++i) {
                        for (int j = 0; j < filterHeight; ++j) {
                            int w = ow * widthStride + i;
                            int h = oh * heightStride + j;
                            if (w < inputWidth && h < inputHeight) {
                                inputsGradient(w, h, c) += filters[oc]->getWeights()(i, j, c) * outputsGradient(ow, oh, oc);
                            }
                        }
                    }
                }
            }
        }
    }
}
void ConvLayer::calculateOutputsGradient(const Tensor<double, 3>& nextLayerOutputsGradient) {
    for (int c = 0; c < outputChannels; ++c) {
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                outputsGradient(w, h, c) = nextLayerOutputsGradient(w, h, c) * activation->derivative(outputFeatureMap(w, h, c));
            }
        }
    }
}
void ConvLayer::calculateWeightsGradient() {
    for (int i = 0; i < numFilters; ++i) {
        for (int c = 0; c < inputChannels; ++c) {
            for (int h = 0; h < filterHeight; ++h) {
                for (int w = 0; w < filterWidth; ++w) {
                    weightsGradient(w, h, c + i * inputChannels) = 0.0;
                    for (int oh = 0; oh < outputHeight; ++oh) {
                        for (int ow = 0; ow < outputWidth; ++ow) {
                            weightsGradient(w, h, c + i * inputChannels) += inputFeatureMap(ow * widthStride + w, oh * heightStride + h, c) * outputsGradient(ow, oh, i);
                        }
                    }
                }
            }
        }
    }
}
void ConvLayer::calculateBiasesGradient() {
    for (int i = 0; i < numFilters; ++i) {
        biasesGradient(i) = 0.0;
        for (int oh = 0; oh < outputHeight; ++oh) {
            for (int ow = 0; ow < outputWidth; ++ow) {
                biasesGradient(i) += outputsGradient(ow, oh, i);
            }
        }
    }
}
void ConvLayer::calculateInputsDelta(double learningRate, double momentum) {
    for (int c = 0; c < inputChannels; ++c) {
        for (int h = 0; h < inputHeight; ++h) {
            for (int w = 0; w < inputWidth; ++w) {
                inputsDelta(w, h, c) = learningRate * inputsGradient(w, h, c) + momentum * prevBatchInputsDelta(w, h, c);
                prevBatchInputsDelta(w, h, c) = inputsDelta(w, h, c);
            }
        }
    }
}
void ConvLayer::calculateOutputsDelta(double learningRate, double momentum) {
    for (int c = 0; c < outputChannels; ++c) {
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                outputsDelta(w, h, c) = learningRate * outputsGradient(w, h, c) + momentum * prevBatchOutputsDelta(w, h, c);
                prevBatchOutputsDelta(w, h, c) = outputsDelta(w, h, c);
            }
        }
    }
}
void ConvLayer::calculateWeightsDelta(double learningRate, double momentum) {
    for (int i = 0; i < numFilters; ++i) {
        for (int c = 0; c < inputChannels; ++c) {
            for (int h = 0; h < filterHeight; ++h) {
                for (int w = 0; w < filterWidth; ++w) {
                    weightsDelta(w, h, c + i * inputChannels) = learningRate * weightsGradient(w, h, c + i * inputChannels) + momentum * prevBatchWeightsDelta(w, h, c + i * inputChannels);
                    prevBatchWeightsDelta(w, h, c + i * inputChannels) = weightsDelta(w, h, c + i * inputChannels);
                }
            }
        }
    }
}
void ConvLayer::calculateBiasesDelta(double learningRate, double momentum) {
    for (int i = 0; i < numFilters; ++i) {
        biasesDelta(i) = learningRate * biasesGradient(i) + momentum * prevBatchBiasesDelta(i);
        prevBatchBiasesDelta(i) = biasesDelta(i);
    }
}
void ConvLayer::forward(const Tensor<double, 3>& inputFeatureMap) {
    setInputFeatureMap(inputFeatureMap);
    calculateOutputFeatureMap();
}
void ConvLayer::backward(const Tensor<double, 3>& nextLayerOutputsGradient) {
    if (prevLayer == nullptr) {
        return;
    }
    if (prevConvLayer != nullptr) {
        if (prevConvLayer->getLayerIndex() != layerIndex - 1) {
            throw runtime_error("Previous convolution layer index is not correct.");
        }
    }
    if (prevPoolingLayer != nullptr) {
        if (prevPoolingLayer->getLayerIndex() != layerIndex - 1) {
            throw runtime_error("Previous pooling layer index is not correct.");
        }
    }
    calculateOutputsGradient(nextLayerOutputsGradient);
    calculateInputsGradient();
    calculateWeightsGradient();
    calculateBiasesGradient();
}
void ConvLayer::updateWeights(double learningRate, double momentum) {
    calculateWeightsDelta(learningRate, momentum);
    for (int i = 0; i < numFilters; ++i) {
        for (int c = 0; c < inputChannels; ++c) {
            for (int h = 0; h < filterHeight; ++h) {
                for (int w = 0; w < filterWidth; ++w) {
                    Tensor<double, 3> weights = filters[i]->getWeights();
                    weights(w, h, c) += weightsDelta(w, h, c + i * inputChannels);
                    filters[i]->setWeights(weights);
                }
            }
        }
    }
}
void ConvLayer::updateBiases(double learningRate, double momentum) {
    calculateBiasesDelta(learningRate, momentum);
    for (int i = 0; i < numFilters; ++i) {
        Tensor<double, 1> biases = filters[i]->getBiases();
        biases(i) += biasesDelta(i);
        filters[i]->setBiases(biases);
    }
}
void ConvLayer::print() const {
    string indent = "   ";
    cout << "Layer: " << endl;
    cout << "InputWidth: " << inputWidth << endl;
    cout << "InputHeight: " << inputHeight << endl;
    cout << "InputChannels: " << inputChannels << endl;
    cout << "Inputs: " << endl;
    for (int c = 0; c < inputChannels; ++c) {
        for (int h = 0; h < inputHeight; ++h) {
            for (int w = 0; w < inputWidth; ++w) {
                cout << inputFeatureMap(w, h, c) << " ";
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "NumFilters: " << numFilters << endl;
    cout << "FilterWidth: " << filterWidth << endl;
    cout << "FilterHeight: " << filterHeight << endl;
    cout << "Filters: " << endl;
    for (int i = 0; i < numFilters; ++i) {
        cout << "   Filter " << i << ": " << endl;
        filters[i]->print();
    }
    cout << "Strides: " << endl;
    cout << "   WidthStride: " << widthStride << endl;
    cout << "   HeightStride: " << heightStride << endl;
    cout << "Padding: " << padding << endl;
    cout << "OutputWidth: " << outputWidth << endl;
    cout << "OutputHeight: " << outputHeight << endl;
    cout << "OutputChannels: " << outputChannels << endl;
    cout << "Outputs: " << endl;
    for (int c = 0; c < outputChannels; ++c) {
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                cout << indent << outputFeatureMap(w, h, c) << " ";
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Output Gradients: " << endl;
    for (int c = 0; c < outputChannels; ++c) {
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                cout << indent << outputsGradient(w, h, c) << " ";
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Input Gradients: " << endl;
    for (int c = 0; c < inputChannels; ++c) {
        for (int h = 0; h < inputHeight; ++h) {
            for (int w = 0; w < inputWidth; ++w) {
                cout << indent << inputsGradient(w, h, c) << " ";
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Weight Gradients: " << endl;
    for (int i = 0; i < numFilters; ++i) {
        for (int c = 0; c < inputChannels; ++c) {
            for (int h = 0; h < filterHeight; ++h) {
                for (int w = 0; w < filterWidth; ++w) {
                    cout << indent << weightsGradient(w, h, c + i * inputChannels) << " ";
                }
                cout << endl;
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Bias Gradients: " << endl;
    for (int i = 0; i < numFilters; ++i) {
        cout << indent << biasesGradient(i) << " ";
    }
    cout << endl;
    cout << "Outputs Delta: " << endl;
    for (int c = 0; c < outputChannels; ++c) {
        for (int h = 0; h < outputHeight; ++h) {
            for (int w = 0; w < outputWidth; ++w) {
                cout << indent << outputsDelta(w, h, c) << " ";
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Inputs Delta: " << endl;
    for (int c = 0; c < inputChannels; ++c) {
        for (int h = 0; h < inputHeight; ++h) {
            for (int w = 0; w < inputWidth; ++w) {
                cout << indent << inputsDelta(w, h, c) << " ";
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Weights Delta: " << endl;
    for (int i = 0; i < numFilters; ++i) {
        for (int c = 0; c < inputChannels; ++c) {
            for (int h = 0; h < filterHeight; ++h) {
                for (int w = 0; w < filterWidth; ++w) {
                    cout << indent << weightsDelta(w, h, c + i * inputChannels) << " ";
                }
                cout << endl;
            }
            cout << endl;
        }
        cout << endl;
    }
    cout << "Biases Delta: " << endl;
    for (int i = 0; i < numFilters; ++i) {
        cout << indent << biasesDelta(i) << " ";
    }
    cout << endl;
    cout << "Activation: " << ActivationFactory<double, 3>::getName(activation.get()) << endl;
    cout << "Loss: " << LossFactory::getName(loss.get()) << endl;
}