#include <iostream>
#include <iomanip>

#include "NeuralNetwork.hpp"
#include "DataGenerator.hpp"
#include "LossFunction.hpp"
#include "Trainer.hpp"

using namespace qing;
void train_regression_example() {
    std::cout << "=== 回归问题训练示例 ===" << std::endl;
    
    // 生成数据
    DataGenerator generator;
    std::vector<std::vector<double>> train_features, train_labels;
    std::vector<std::vector<double>> val_features, val_labels;
    
    generator.generate_linear_data(1000, 3, train_features, train_labels, 0.1);
    generator.generate_linear_data(200, 3, val_features, val_labels, 0.1);
    
    // 创建模型
    NeuralNetwork model;
    model.add_layer(std::make_unique<LinearBlock>(3, 10));
    model.add_layer(std::make_unique<ReLU>(10));
    model.add_layer(std::make_unique<LinearBlock>(10, 5));
    model.add_layer(std::make_unique<ReLU>(5));
    model.add_layer(std::make_unique<LinearBlock>(5, 1));
    
    model.print_architecture();
    
    // 创建训练器
    MSELoss loss_fn;
    Trainer trainer(model, loss_fn, 0.001);
    
    // 训练循环
    int epochs = 100;
    for (int epoch = 0; epoch < epochs; ++epoch) {
        double train_loss = trainer.train_epoch(train_features, train_labels, 32);
        double val_loss = trainer.validate(val_features, val_labels);
        
        if (epoch % 10 == 0) {
            std::cout << "Epoch " << std::setw(3) << epoch 
                      << " | Train Loss: " << std::fixed << std::setprecision(6) << train_loss
                      << " | Val Loss: " << val_loss << std::endl;
        }
    }
}

void train_classification_example() {
    std::cout << "\n=== 分类问题训练示例 (XOR问题) ===" << std::endl;
    
    // 生成XOR数据
    DataGenerator generator;
    std::vector<std::vector<double>> train_features, train_labels;
    
    generator.generate_xor_data(100, train_features, train_labels);
    
    // 创建模型 - 需要非线性激活函数来解决XOR问题
    NeuralNetwork model;
    model.add_layer(std::make_unique<LinearBlock>(2, 4));
    model.add_layer(std::make_unique<ReLU>(4));
    model.add_layer(std::make_unique<LinearBlock>(4, 1));
    
    model.print_architecture();
    
    // 创建训练器
    BCELoss loss_fn;
    Trainer trainer(model, loss_fn, 0.01);
    
    // 训练循环
    int epochs = 200;
    for (int epoch = 0; epoch < epochs; ++epoch) {
        double train_loss = trainer.train_epoch(train_features, train_labels, 16);
        
        if (epoch % 20 == 0) {
            // 计算准确率
            int correct = 0;
            model.set_training(false);
            for (size_t i = 0; i < train_features.size(); ++i) {
                auto prediction = model.forward(train_features[i]);
                int pred_class = (prediction[0] > 0.5) ? 1 : 0;
                int true_class = static_cast<int>(train_labels[i][0]);
                if (pred_class == true_class) correct++;
            }
            model.set_training(true);
            
            double accuracy = static_cast<double>(correct) / train_features.size();
            std::cout << "Epoch " << std::setw(3) << epoch 
                      << " | Loss: " << std::fixed << std::setprecision(6) << train_loss
                      << " | Accuracy: " << std::setprecision(4) << accuracy * 100 << "%" << std::endl;
        }
    }
}

int main() {
    // 训练回归模型
    train_regression_example();
    
    // 训练分类模型
    train_classification_example();
    
    return 0;
}