#include <iostream>
#include <cmath>
#include <vector>
#include <memory>
#include <fstream>

#include "nets.hpp"

int main(int argc, char* argv[])
{
    std::cout << std::scientific << std::setprecision(7);

    const bool useInitConditionsMoreTimes = true;

    // 准备时间输入
    const int numTimePoints = 100;
    std::vector<float> t;
    std::vector<long> idx;
    for (int i = 0; i < numTimePoints; ++i)
    {
        t.push_back(2.0 * M_PI / double(numTimePoints - 1) * i);
        idx.push_back(i);
        //
        if (useInitConditionsMoreTimes)
        {
            t.push_back(0.0);
            idx.push_back(0);
        }
    }

    torch::Tensor tT =
        torch::from_blob(t.data(), {int(t.size()), 1}, torch::kFloat).requires_grad_(true);
    torch::Tensor idxT = torch::from_blob(idx.data(), {int(t.size())}, torch::kLong);

    auto dataSet = CustomDtaset(tT, idxT).map(torch::data::transforms::Stack<>());

    auto dataLoader = torch::data::make_data_loader<torch::data::samplers::RandomSampler>(
        std::move(dataSet), 32);

    // 神经网络和优化器
    std::shared_ptr<Net> net = std::make_shared<Net>(1, 1);

    auto optimizer = std::make_shared<torch::optim::AdamW>(net->parameters(), 1.e-2);

    // 开始训练
    double lossVal = 1.0;
    int epochIdx   = 0;
    while (lossVal > 1.e-6)
    {
        lossVal = 0.0;
        net->train();
        for (auto& batch : *dataLoader)
        {
            torch::Tensor inp = batch.data;
            torch::Tensor tag = batch.target;
            const int theSize = tag.size(0);

            // 向前传播
            torch::Tensor out = net->forward(inp);

            const auto ones = torch::ones_like(out);
            torch::Tensor ddt =
                torch::autograd::grad({out}, {inp}, {ones}, true, true, false)[0];
            torch::Tensor d2dt2 =
                torch::autograd::grad({ddt}, {inp}, {ones}, true, true, false)[0];

            // 构造损失函数
            torch::Tensor outTag = torch::zeros_like(out);
            torch::Tensor ddtTag = torch::zeros_like(out);
            torch::Tensor odeVal = torch::zeros_like(out);
            for (int i = 0; i < theSize; ++i)
            {
                if (tag[i].item<long>() == 0)
                { // 索引 0 对应初始时刻，使用初始条件约束
                    outTag[i][0] = 0.0;
                    ddtTag[i][0] = 0.1;
                }
                else
                { // 其他时刻没有参考值约束
                    outTag[i][0] = out[i][0].item<float>();
                    ddtTag[i][0] = ddt[i][0].item<float>();
                }
                // 按照物理方程构造
                odeVal[i][0] = d2dt2[i][0] + out[i][0];
            }

            // 各项误差损失
            torch::Tensor outLoss = torch::mse_loss(out, outTag);
            torch::Tensor ddtLoss = torch::mse_loss(ddt, ddtTag);
            torch::Tensor odeLoss = torch::mse_loss(odeVal, torch::zeros_like(odeVal));

            // 总误差损失
            auto totalLoss = outLoss + ddtLoss + odeLoss;

            // 反向传播
            optimizer->zero_grad();
            totalLoss.backward();
            optimizer->step();

            // 打印日志
            lossVal += totalLoss.item<float>();
            std::cout << "TOTAL LOSS: " << lossVal << ", OUT LOSS: " << outLoss.item<float>()
                      << ", DDT LOSS1: " << ddtLoss.item<float>()
                      << ", D2DT2 LOSS2: " << odeLoss.item<float>() << std::endl;
        }

        //
        epochIdx += 1;

        //
        net->eval();
        torch::Tensor out = net->forward(tT);
        std::ofstream os;
        os.open("solution-" + // std::string(4 - std::to_string(epochIdx).length(), '0') +
                    std::to_string(epochIdx) + ".txt",
                std::ios::out);
        for (int i = 0; i < int(t.size()); ++i)
        {
            if (i % 2 != 0 && useInitConditionsMoreTimes) continue;
            os << t[i] << " " << out[i][0].item<float>() << std::endl;
        }
        os.close();
    }

    return 0;
}
