#include <cmath>

namespace qing {
    class LossFunction {
    public:
        virtual ~LossFunction() = default;
        
        // 计算损失值
        virtual double compute_loss(const std::vector<double>& predictions, 
                                const std::vector<double>& targets) = 0;
        
        // 计算损失梯度
        virtual std::vector<double> compute_gradient(const std::vector<double>& predictions,
                                                    const std::vector<double>& targets) = 0;
        
        // 获取损失函数名称
        virtual std::string get_loss_name() const = 0;
    };

    // 均方误差损失
    class MSELoss : public LossFunction {
    public:
        double compute_loss(const std::vector<double>& predictions, 
                        const std::vector<double>& targets) override {
            if (predictions.size() != targets.size()) {
                throw std::invalid_argument("Predictions and targets size mismatch");
            }
            
            double loss = 0.0;
            for (size_t i = 0; i < predictions.size(); ++i) {
                double diff = predictions[i] - targets[i];
                loss += diff * diff;
            }
            return loss / predictions.size();
        }
        
        std::vector<double> compute_gradient(const std::vector<double>& predictions,
                                            const std::vector<double>& targets) override {
            if (predictions.size() != targets.size()) {
                throw std::invalid_argument("Predictions and targets size mismatch");
            }
            
            std::vector<double> gradient(predictions.size());
            for (size_t i = 0; i < predictions.size(); ++i) {
                gradient[i] = 2.0 * (predictions[i] - targets[i]) / predictions.size();
            }
            return gradient;
        }
        
        std::string get_loss_name() const override {
            return "MSE Loss";
        }
    };

    // 二元交叉熵损失（用于二分类）
    class BCELoss : public LossFunction {
    public:
        double compute_loss(const std::vector<double>& predictions, 
                        const std::vector<double>& targets) override {
            if (predictions.size() != targets.size()) {
                throw std::invalid_argument("Predictions and targets size mismatch");
            }
            
            double loss = 0.0;
            for (size_t i = 0; i < predictions.size(); ++i) {
                double p = std::max(std::min(predictions[i], 1.0 - 1e-15), 1e-15);
                loss += targets[i] * std::log(p) + (1 - targets[i]) * std::log(1 - p);
            }
            return -loss / predictions.size();
        }
        
        std::vector<double> compute_gradient(const std::vector<double>& predictions,
                                            const std::vector<double>& targets) override {
            if (predictions.size() != targets.size()) {
                throw std::invalid_argument("Predictions and targets size mismatch");
            }
            
            std::vector<double> gradient(predictions.size());
            for (size_t i = 0; i < predictions.size(); ++i) {
                double p = std::max(std::min(predictions[i], 1.0 - 1e-15), 1e-15);
                gradient[i] = (p - targets[i]) / (p * (1 - p)) / predictions.size();
            }
            return gradient;
        }
        
        std::string get_loss_name() const override {
            return "Binary Cross Entropy Loss";
        }
    };
}