import itertools
import pathlib

import torch

import models
import shared.checkpoint


def _loss(model: torch.nn.Module, point_count: int = 1000):
    # 方程
    def l1():
        t = torch.rand(point_count)
        t = t.requires_grad_()
        x = torch.rand(point_count) * 2 - 1
        x = x.requires_grad_()

        f = model(torch.stack([t, x], dim=-1))
        f = f[:, 0]

        df_dt, df_dx = torch.autograd.grad(f, [t, x], torch.ones_like(f), create_graph=True)
        df_dx_dx, = torch.autograd.grad(df_dx, x, torch.ones_like(df_dx), create_graph=True)

        loss = df_dt - 0.0001 * df_dx_dx + 5 * f ** 3 - 5 * f
        return torch.mean(loss ** 2)

    # 初始
    def l2():
        t = torch.rand(point_count) * 0
        x = torch.rand(point_count) * 2 - 1

        f = model(torch.stack([t, x], dim=-1))[:, 0]

        loss = f - x ** 2 * torch.cos(torch.pi * x)
        return torch.mean(loss ** 2)

    # 边界1
    def l3():
        t = torch.rand(point_count)
        x = torch.rand(point_count) * 0 - 1

        f1 = model(torch.stack([t, x], dim=-1))
        f2 = model(torch.stack([t, x + 2], dim=-1))

        loss = f1 - f2
        return torch.mean(loss ** 2)

    # 边界2
    def l4():
        t = torch.rand(point_count)
        x = torch.rand(point_count) * 0 - 1
        x = x.requires_grad_()

        f1 = model(torch.stack([t, x], dim=-1))
        df1_dx, = torch.autograd.grad(f1, x, torch.ones_like(f1), create_graph=True)

        x = torch.rand(point_count) * 0 + 1
        x = x.requires_grad_()

        f2 = model(torch.stack([t, x], dim=-1))
        df2_dx, = torch.autograd.grad(f2, x, torch.ones_like(f2), create_graph=True)

        loss = df1_dx - df2_dx
        return torch.mean(loss ** 2)

    return 10 * l1() + 300 * l2() + 10 * l3() + 10 * l4()


def main():
    torch.random.manual_seed(1234)
    torch.set_default_device("cuda" if torch.cuda.is_available() else "cpu")

    model = models.get_model()
    optimizer = torch.optim.Adam(model.parameters())

    checkpoint_manager = shared.checkpoint.CheckpointManager(pathlib.Path("./checkpoints"))
    previous_checkpoint = checkpoint_manager.last_existed()
    if previous_checkpoint:
        checkpoint = torch.load(previous_checkpoint[1])
        model.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        torch.random.set_rng_state(checkpoint["random"])
    else:
        previous_checkpoint = (0, None)

    model.train()
    for epoch in itertools.count(previous_checkpoint[0] + 1):
        optimizer.zero_grad()
        if epoch <= 100000:
            c = _loss(model, 1000)
        else:
            c = _loss(model, 10000)
        c.backward()
        optimizer.step()

        print(f"Epoch {epoch}: {c}")
        if epoch % 500 == 0:
            torch.save({
                "model": model.state_dict(),
                "random": torch.random.get_rng_state(),
                "optimizer": optimizer.state_dict()
            }, checkpoint_manager.new(epoch))


if __name__ == "__main__":
    main()
