# https://gitee.com/yueyinqiu5990/tj12413601/blob/master/assignment4/question1/main_train.py
# Modified from https://gitee.com/yueyinqiu5990/tj12413601/blob/master/assignment3/question2/main_train.py
import torch
import torch.utils.data

import datasets
import models


def train(model: models.LorentzDispersionModel,
          epoch_count: int = 10000,
          silent: bool = False,
          dataset: datasets.EpsDataset = None):
    if dataset is None:
        dataset = datasets.EpsDataset()
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=len(dataset))
    train_data_x, train_data_y = next(iter(data_loader))

    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(params=model.parameters())

    loss = float("nan")
    for epoch in range(epoch_count):
        optimizer.zero_grad()
        predicted = model(train_data_x)
        loss = criterion(predicted, train_data_y)
        loss.backward()
        optimizer.step()
        if not silent:
            print(f"Epoch {epoch:00000}: {loss}")

    if not silent:
        print()
        print(f"epsilon_infinity={float(model.epsilon_infinity.data):.3e}")
        print(f"omega_p={float(model.omega_p):.3e}")
        print(f"omega_alpha={float(model.omega_alpha):.3e}")
        print(f"omega_c={float(model.omega_c):.3e}")

    return loss


def main():
    model = models.LorentzDispersionModel()
    train(model)
    torch.save(model.state_dict(), "./outputs/trained_model.pt")


if __name__ == "__main__":
    main()
