import itertools

import torch
import torch.utils.data

import torch_plus
from checkpoint import Checkpoint
from checkpoint_manager import CheckpointManager
from scaler import Scaler
from simple_pendulum_dataset import SimplePendulumDataset
from spinn import Spinn


def _loss(model: Spinn,
          dataset: SimplePendulumDataset,
          t_min: float,
          t_max: float,
          physics_weight: float,
          data_weight: float,
          random: torch.Generator) -> torch.Tensor:
    def physics_loss():
        t = torch_plus.rand(1000,
                            t_min, t_max,
                            random, True)

        theta = model(t)
        d_theta_d_t, = torch_plus.grad(
            theta, [t], True, True)
        d_theta_d_t_d_t, = torch_plus.grad(
            d_theta_d_t, [t], True, True)

        loss = model.m * model.l_ * d_theta_d_t_d_t
        loss += model.m * model.g * torch.sin(theta)
        loss += model.gamma * model.l_ * d_theta_d_t

        return torch.mean(loss ** 2)

    def data_loss():
        loss = model(dataset.t()) - dataset.theta()
        return torch.mean(loss ** 2)

    p = physics_loss()
    d = data_loss()
    return p * physics_weight + d * data_weight


def train(checkpoint_manager: CheckpointManager,
          dataset: SimplePendulumDataset,
          model_shape: tuple[int, int] = (300, 3),
          loss_weights: tuple[float, float] = (10, 1000)):
    epoch, checkpoint = checkpoint_manager.last_existed_loaded(Checkpoint.load)

    t_min = float(dataset.t_min())
    t_max = float(dataset.t_max())

    random = torch.Generator()
    random.manual_seed(1234)
    if checkpoint:
        random.set_state(checkpoint.random_state())

    model = Spinn(model_shape[0], model_shape[1], Scaler(t_min, t_max))
    if checkpoint:
        model.load_state_dict(checkpoint.model_state())
    else:
        model.m.data = torch_plus.as_tensor(dataset.problem().m())
        model.l_.data = torch_plus.as_tensor(dataset.problem().l_())
    model.m.requires_grad_(False)
    model.l_.requires_grad_(False)

    optimizer = torch.optim.Adam(model.parameters())
    if checkpoint:
        optimizer.load_state_dict(checkpoint.optimizer_state())

    model.train()

    for epoch in itertools.count(epoch + 1):
        optimizer.zero_grad()
        loss = _loss(model, dataset, t_min, t_max, loss_weights[0], loss_weights[1], random)
        loss.backward()
        optimizer.step()

        print(f"Epoch {epoch}: g={float(model.g):.2e} gamma={float(model.gamma):.2e} loss={loss}")
        if epoch % 1000 == 0:
            checkpoint = Checkpoint(
                model.state_dict(),
                random.get_state(),
                optimizer.state_dict()
            )
            checkpoint.save(checkpoint_manager.new(epoch))
