import itertools
import pathlib

import torch

import models
import shared.checkpoint


def _loss(model: torch.nn.Module, point_count: int = 1000):
    # 方程
    def l1():
        x = torch.rand(point_count) * 10 - 5
        x = x.requires_grad_()
        t = torch.rand(point_count) * (torch.pi / 2)
        t = t.requires_grad_()

        f = model(torch.stack([x, t], dim=-1))
        u = f[:, 0]
        v = f[:, 1]

        du_dx, du_dt = torch.autograd.grad(u, [x, t], torch.ones_like(u), create_graph=True)
        dv_dx, dv_dt = torch.autograd.grad(v, [x, t], torch.ones_like(v), create_graph=True)

        du_dx_dx, = torch.autograd.grad(du_dx, x, torch.ones_like(du_dx), create_graph=True)
        dv_dx_dx, = torch.autograd.grad(dv_dx, x, torch.ones_like(dv_dx), create_graph=True)

        loss_real = -dv_dt + 0.5 * du_dx_dx + u * (u ** 2 + v ** 2)
        loss_imaginary = du_dt + 0.5 * dv_dx_dx + v * (u ** 2 + v ** 2)
        return torch.mean(loss_real ** 2 + loss_imaginary ** 2)

    # 初始
    def l2():
        x = torch.rand(point_count) * 10 - 5
        t = torch.rand(point_count) * 0

        f = model(torch.stack([x, t], dim=-1))
        u = f[:, 0]
        v = f[:, 1]

        loss_real = u - 2 / torch.cosh(x)
        loss_imaginary = v
        return torch.mean(loss_real ** 2 + loss_imaginary ** 2)

    # 边界1
    def l3():
        x = torch.rand(point_count) * 10 - 5
        t = torch.rand(point_count) * (torch.pi / 2)

        f1 = model(torch.stack([x, t], dim=-1))
        u1 = f1[:, 0]
        v1 = f1[:, 1]

        f2 = model(torch.stack([x + 10, t], dim=-1))
        u2 = f2[:, 0]
        v2 = f2[:, 1]

        loss_real = u1 - u2
        loss_imaginary = v1 - v2
        return torch.mean(loss_real ** 2 + loss_imaginary ** 2)

    # 边界2
    def l4():
        x = torch.rand(point_count) * 0 - 5
        x = x.requires_grad_()
        t = torch.rand(point_count) * (torch.pi / 2)

        f1 = model(torch.stack([x, t], dim=-1))
        u1 = f1[:, 0]
        v1 = f1[:, 1]
        du1_dx, = torch.autograd.grad(u1, x, torch.ones_like(u1), create_graph=True)
        dv1_dx, = torch.autograd.grad(v1, x, torch.ones_like(v1), create_graph=True)

        x = torch.clone(torch.detach(x))
        x = x + 10
        x = x.requires_grad_()
        f2 = model(torch.stack([x, t], dim=-1))
        u2 = f2[:, 0]
        v2 = f2[:, 1]
        du2_dx, = torch.autograd.grad(u2, x, torch.ones_like(u2), create_graph=True)
        dv2_dx, = torch.autograd.grad(v2, x, torch.ones_like(v2), create_graph=True)

        loss_real = du1_dx - du2_dx
        loss_imaginary = dv1_dx - dv2_dx
        return torch.mean(loss_real ** 2 + loss_imaginary ** 2)

    return 10 * l1() + 10 * l2() + 10 * l3() + 10 * l4()


def main():
    torch.random.manual_seed(1234)
    torch.set_default_device("cuda" if torch.cuda.is_available() else "cpu")

    model = models.get_model()
    optimizer = torch.optim.Adam(model.parameters())

    checkpoint_manager = shared.checkpoint.CheckpointManager(pathlib.Path("./checkpoints"))
    previous_checkpoint = checkpoint_manager.last_existed()
    if previous_checkpoint:
        checkpoint = torch.load(previous_checkpoint[1])
        model.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        torch.random.set_rng_state(checkpoint["random"])
    else:
        previous_checkpoint = (0, None)

    model.train()
    for epoch in itertools.count(previous_checkpoint[0] + 1):
        optimizer.zero_grad()
        if epoch <= 100000:
            c = _loss(model, 1000)
        else:
            c = _loss(model, 10000)
        c.backward()
        optimizer.step()

        print(f"Epoch {epoch}: {c}")
        if epoch % 1000 == 0:
            torch.save({
                "model": model.state_dict(),
                "random": torch.random.get_rng_state(),
                "optimizer": optimizer.state_dict()
            }, checkpoint_manager.new(epoch))


if __name__ == "__main__":
    main()
