#ifndef LOSSFUNCTION_H
#define LOSSFUNCTION_H

#include <cmath>
#include <vector>

class ILossFunction {
public:
    virtual ~ILossFunction() = default;
    virtual float loss(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const = 0;
    virtual std::vector<std::vector<float>> gradient(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const = 0;
};

// 二分类交叉熵
class BinaryCrossEntropyLoss : public ILossFunction {
public:
    float loss(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const override {
        const float epsilon = 1e-9;
        float totalLoss = 0.0;
        size_t count = 0;

        for (size_t i = 0; i < outputs.size(); ++i) {
            for (size_t j = 0; j < outputs[i].size(); ++j) {
                float y = expectedOutputs[i][j];
                float yHat = std::min(std::max(outputs[i][j], epsilon), 1.0f - epsilon); // Limit yHat within (epsilon, 1-epsilon)
                totalLoss += -y * std::log(yHat) - (1 - y) * std::log(1 - yHat);
                ++count;
            }
        }

        return totalLoss / count;
    }

    // 计算二分类交叉熵损失的梯度
    // outputs: 网络的输出值，每个元素代表一个样本的预测概率，维度为[num_samples][num_outputs]
    // expectedOutputs: 实际的标签值，每个元素是0或1，代表样本是否属于正类，维度与outputs相同
    std::vector<std::vector<float>> gradient(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const override {
        const float epsilon = 1e-9;
        std::vector<std::vector<float>> gradients(outputs.size(), std::vector<float>(outputs[0].size()));

        for (size_t i = 0; i < outputs.size(); ++i) {
            for (size_t j = 0; j < outputs[i].size(); ++j) {
                float y = expectedOutputs[i][j];
                float yHat = std::min(std::max(outputs[i][j], epsilon), 1.0f - epsilon); // Limit yHat within (epsilon, 1-epsilon)
                gradients[i][j] = -y / yHat + (1 - y) / (1 - yHat);
            }
        }

        return gradients;
    }
};


//均方误差
class MeanSquaredErrorLoss : public ILossFunction {
public:
    float loss(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const override {
        float totalLoss = 0.0;
        size_t count = 0;

        for (size_t i = 0; i < outputs.size(); ++i) {
            for (size_t j = 0; j < outputs[i].size(); ++j) {
                float y = expectedOutputs[i][j];
                float yHat = outputs[i][j];
                totalLoss += (y - yHat) * (y - yHat);
                ++count;
            }
        }

        return totalLoss / count;
    }


    // 计算均方误差损失的梯度
    // outputs: 网络的输出值，每个元素代表一个样本的预测概率，维度为[num_samples][num_outputs]
    // expectedOutputs: 实际的标签值，每个元素是0或1，代表样本是否属于正类，维度与outputs相同
    std::vector<std::vector<float>> gradient(const std::vector<std::vector<float>>& outputs, const std::vector<std::vector<float>>& expectedOutputs) const override {
        std::vector<std::vector<float>> gradients(outputs.size(), std::vector<float>(outputs[0].size()));

        for (size_t i = 0; i < outputs.size(); ++i) {
            for (size_t j = 0; j < outputs[i].size(); ++j) {
                float y = expectedOutputs[i][j];
                float yHat = outputs[i][j];
                gradients[i][j] = 2 * (yHat - y) / outputs[i].size();
            }
        }

        return gradients;
    }
};

#endif // LOSSFUNCTION_H
