#include <iostream>
#include <vector>
#include <memory>
#include "deep_o_net.h"
#include "matrix_operations_cpu.h"
#include "relu.h"
#include "loss_function.h"
// 假设相关的网络和矩阵操作类已经被包含进来

std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis_k(0.005, 0.02);  // 热传导系数的范围
std::uniform_real_distribution<> dis_amp(0.5, 2.0);  // 初始振幅的范围

// 函数用于计算一维热方程的数值解，并随机化一些参数
std::vector<std::tuple<std::vector<float>, std::vector<float>, float>> generateHeatEquationData(int nx, int nt, float dx, float dt, int num_datasets) {
    std::vector<std::tuple<std::vector<float>, std::vector<float>, float>> all_data_tuples;

    for (int set = 0; set < num_datasets; ++set) {
        float k = dis_k(gen);     // 随机选择热传导系数
        float amplitude = dis_amp(gen);  // 随机选择初始振幅
        std::vector<std::vector<float>> u(nt, std::vector<float>(nx, 0.0));

        // 设置初始条件
        for (int i = 0; i < nx; ++i) {
            u[0][i] = amplitude * sin(M_PI * i * dx);
        }

        float alpha = k * dt / (dx * dx);

        // 时间步进
        for (int t = 0; t < nt - 1; ++t) {
            for (int i = 1; i < nx - 1; ++i) {
                u[t + 1][i] = u[t][i] + alpha * (u[t][i - 1] - 2 * u[t][i] + u[t][i + 1]);
            }
            if (t < nt - 2) {  // 保证有下一个时间步的数据可用
                all_data_tuples.push_back({u[t], u[t+1], t * dt});
            }
        }
    }
    return all_data_tuples;
}


int main() {
    size_t nx = 10;   // 空间步数
    size_t nt = 100;  // 时间步数
    float dx = 1.0 / (nx - 1);
    float dt = 0.01;
    int num_datasets = 50;  // 生成训练数据集的数量
    int num_test_datasets = 1; // 生成测试数据集的数量
    float lr = 0.02; //学习率

    // 生成训练和测试数据
    auto train_datasets = generateHeatEquationData(nx, nt, dx, dt, num_datasets);
    auto test_datasets = generateHeatEquationData(nx, nt, dx, dt, num_test_datasets);

    // 初始化网络和损失函数
    auto matrixOps = std::make_shared<MatrixOperationsCPU>(); // 需要具体实现
    auto activationFunc = std::make_shared<ReLU>(); // 需要具体实现
    TrunkNetwork trunkNet({1, 100, 10}, lr, matrixOps, activationFunc);
    BranchNetwork branchNet({nx, 100, 10}, lr, matrixOps, activationFunc);
    auto lossFunction = std::make_shared<MeanSquaredErrorLoss>();

    DeepONet deepONet(branchNet, trunkNet, lossFunction);

    // 训练DeepONet
    int epochs = 10;
    for (int epoch = 0; epoch < epochs; ++epoch) {
        for (const auto& data_tuple : train_datasets) {
            std::vector<std::vector<float>> branchInputs = {std::get<0>(data_tuple)};
            std::vector<std::vector<float>> trunkInputs = {{std::get<2>(data_tuple)}};  // 将时间作为 trunk 的输入
            std::vector<std::vector<float>> expectedOutputs = {std::get<1>(data_tuple)};
            deepONet.train(branchInputs, trunkInputs, expectedOutputs);
        }
    }

    // 进行测试并输出结果
    std::cout << "Test Predictions:" << std::endl;
    for (const auto& test_data : test_datasets) {
        std::vector<std::vector<float>> branchInputs = {std::get<0>(test_data)};
        std::vector<std::vector<float>> trunkInputs = {{std::get<2>(test_data)}};
        auto predictions = deepONet.predict(branchInputs, trunkInputs);

        std::cout << "Predicted vs Expected:" << std::endl;
        for (size_t i = 0; i < predictions[0].size(); ++i) {
            std::cout << "Predicted: " << predictions[0][i] << ", Expected: " << std::get<1>(test_data)[i] << std::endl;
        }
        std::cout << "-----" << std::endl;
    }

    return 0;
}
