namespace qing {
    class Trainer {
    private:
        NeuralNetwork& model;
        LossFunction& loss_function;
        double learning_rate;

    public:
        Trainer(NeuralNetwork& model, LossFunction& loss_fn, double lr = 0.01)
            : model(model), loss_function(loss_fn), learning_rate(lr) {}

        // 训练一个epoch
        double train_epoch(const std::vector<std::vector<double>>& features,
                        const std::vector<std::vector<double>>& labels,
                        int batch_size = 32) {
            if (features.size() != labels.size()) {
                throw std::invalid_argument("Features and labels size mismatch");
            }

            double total_loss = 0.0;
            int num_batches = 0;

            // 简单的顺序批次训练（实际中应该打乱数据）
            for (size_t start = 0; start < features.size(); start += batch_size) {
                size_t end = std::min(start + batch_size, features.size());
                double batch_loss = 0.0;

                for (size_t i = start; i < end; ++i) {
                    // 前向传播
                    auto prediction = model.forward(features[i]);
                    
                    // 计算损失
                    batch_loss += loss_function.compute_loss(prediction, labels[i]);
                    
                    // 计算损失梯度
                    auto grad_output = loss_function.compute_gradient(prediction, labels[i]);
                    
                    // 反向传播
                    model.backward(grad_output);
                }

                // 平均批次损失
                batch_loss /= (end - start);
                total_loss += batch_loss;
                num_batches++;

                // 更新参数
                model.update_parameters(learning_rate);
            }

            return total_loss / num_batches;
        }

        // 验证函数
        double validate(const std::vector<std::vector<double>>& features,
                    const std::vector<std::vector<double>>& labels) {
            model.set_training(false);  // 设置为评估模式
            
            double total_loss = 0.0;
            for (size_t i = 0; i < features.size(); ++i) {
                auto prediction = model.forward(features[i]);
                total_loss += loss_function.compute_loss(prediction, labels[i]);
            }
            
            model.set_training(true);  // 恢复训练模式
            return total_loss / features.size();
        }

        // 设置学习率
        void set_learning_rate(double lr) {
            learning_rate = lr;
        }
    };
}