/*
神经元层实现
管理神经网络层的神经元集合
支持层间连接和批量处理
*/
#include "Neuron.h"
#include "Connection.h"
#include "NeuronLayer.h"
#include "NeuronManager.h"
#include <vector>
#include <memory>
#include <random>
#include <string>
#include <cmath>
#include <iostream>

NeuronLayer::NeuronLayer(int layerIndex, const LayerType& layerType, int numNeurons, int startNeuronId, const string& activationName, const string& lossName)
    : Layer(layerIndex, layerType, activationName, lossName), numNeurons(numNeurons), startNeuronId(startNeuronId), neurons(numNeurons) {
    if (numNeurons <= 0) {
        throw invalid_argument("Number of neurons must be positive");
    }
    if (layerIndex < 0) {
        throw invalid_argument("Layer index must be non-negative");
    }
    if (startNeuronId < 0) {
        throw invalid_argument("Start neuron ID must be non-negative");
    }
    if (startNeuronId + numNeurons - 1 >= numeric_limits<int>::max()) {
        throw invalid_argument("Start neuron ID + number of neurons must be less than max int");
    }
    if (layerType != LayerType::INPUT && layerType != LayerType::HIDDEN && layerType != LayerType::OUTPUT) {
        throw invalid_argument("Invalid layer type");
    }
    // Create neuron instances
    for (int i = 0; i < numNeurons; ++i) {
        neurons[i] = make_shared<Neuron>(startNeuronId + i, layerIndex, i);
        NeuronManager::getInstance().addNeuron(neurons[i]);
    }
}
NeuronLayer::~NeuronLayer() {}
int NeuronLayer::getNumNeurons() const {
    return numNeurons;
}
int NeuronLayer::getStartNeuronId() const {
    return startNeuronId;
}
const vector<shared_ptr<Neuron> >& NeuronLayer::getNeurons() const {
    return neurons;
}
const vector<double> NeuronLayer::getOutputs() const {
    vector<double> outputs;
    outputs.reserve(numNeurons);
    for (int i = 0; i < numNeurons; ++i) {
        outputs.push_back(neurons[i]->getOutput());
    }
    return outputs;
}
const vector<double> NeuronLayer::getErrors() const {
    vector<double> errors;
    errors.reserve(numNeurons);
    for (int i = 0; i < numNeurons; ++i) {
        errors.push_back(neurons[i]->getDelta());
    }
    return errors;
}
void NeuronLayer::forward(const vector<double>& inputs) {
    if (layerType == LayerType::INPUT) {
        if (inputs.size() != neurons.size()) {
            throw invalid_argument("Input size " + to_string(inputs.size()) + " does not match number of neurons in input layer: " + to_string(neurons.size()));
        }
        for (size_t i = 0; i < neurons.size(); i++) {
            neurons[i]->calculateInputLayerOutput(inputs[i]);
        }
        return;
    }
    else {
        for (size_t i = 0; i < neurons.size(); i++) {
            vector<shared_ptr<Connection> > upstreamConnections = neurons[i]->getUpstreamConnections();
            size_t numInputs = upstreamConnections.size();
            if (numInputs != inputs.size()) {
                string layerTypeStr = layerType == LayerType::INPUT ? "Input" : (layerType == LayerType::HIDDEN ? "Hidden" : "Output");
                throw invalid_argument("Number of inputs of layer " + to_string(layerIndex) + "(" + layerTypeStr + " Layer)" + " does not match number of neurons of upstream layer "
                    + to_string(layerIndex-1) + ": "
                    + to_string(inputs.size())  + " vs " + to_string(numInputs));
            }
            // Prepare input-weight pairs, can be optimized to store directly in Layer for shared use
            vector<pair<double, double> > inputsWeights(numInputs);
            inputsWeights.reserve(numInputs);
            for (size_t j = 0; j < numInputs; j++) {
                inputsWeights.push_back(make_pair(inputs[j], upstreamConnections[j]->getWeight()));
            }
            if (layerType == LayerType::OUTPUT) {
                neurons[i]->calculateOutputLayerOutput(inputsWeights, inputs[i]);
            }
            else if (layerType == LayerType::HIDDEN) {
                neurons[i]->calculateHiddenLayerOutput(inputsWeights);
            }
        }
    }
}
void NeuronLayer::backward(const vector<double>& downstreamDeltas) {
    if (layerType == LayerType::INPUT) {
        throw logic_error("backward can only be called on hidden and output layers");
    }
    // Here implementation of the delta backpropagation is different from the forward propagation, hidden the calculation details in Neuron class
    for (int i = 0; i < neurons.size(); i++) {
        if (layerType == LayerType::OUTPUT) {
            if (downstreamDeltas.size() != neurons.size()) {
                throw invalid_argument("Number of deltas does not match number of neurons in layer "
                    + to_string(layerIndex) + ": "
                    + to_string(downstreamDeltas.size())  + " vs " + to_string(neurons.size()));
            }
            neurons[i]->calculateOutputLayerDelta(downstreamDeltas[i]); // Here deltas are actually target values
        }
        else if (layerType == LayerType::HIDDEN) {
            neurons[i]->calculateHiddenLayerDelta();
        }
    }
}
void NeuronLayer::updateWeights(double learningRate) {
    for (int i = 0; i < neurons.size(); i++) {
        neurons[i]->updateInputWeights(learningRate);
    }
}
void NeuronLayer::updateWeights(double learningRate, double momentum) {
    for (int i = 0; i < neurons.size(); i++) {
        neurons[i]->updateInputWeights(learningRate, momentum);
    }
}
void NeuronLayer::updateBias(double learningRate) {       
    for (int i = 0; i < neurons.size(); i++) {
        neurons[i]->updateBias(learningRate);
    }
}
void NeuronLayer::updateBias(double learningRate, double momentum) {       
    for (int i = 0; i < neurons.size(); i++) {
        neurons[i]->updateBias(learningRate, momentum);
    }
}
void NeuronLayer::print() const {
    string layerTypeStr = layerType == LayerType::INPUT ? "Input" : (layerType == LayerType::HIDDEN ? "Hidden" : "Output");
    cout << "Layer " << layerIndex << " (" << layerTypeStr << ")"
    << ", Number of neurons: " << numNeurons << endl;
    for (const auto& neuron : neurons) {
        neuron->print();
    }
}