import os

import torch

from llmpt.neural import GPT
from .model import GPTModel
from .training import GPTTraining, GPTTrainingInitArgs, GPTTrainingResumeArgs


class GPTTrainingSimple(GPTTraining):
    nn: GPT
    optimizer: torch.optim.Optimizer

    # model

    def _init_model(self, args: GPTTrainingInitArgs):
        self.nn = GPTModel.construct_nn(self.model_hparams, self.device)
        self.optimizer = torch.optim.Adam(self.nn.parameters(), lr=self.training_hparams.learning_rate)

        if isinstance(args, GPTTrainingResumeArgs):
            nn_file_path = os.path.join(self.resume_checkpoint_dir_path, f"nn.pt")
            nn_state = torch.load(nn_file_path, map_location=self.device)
            self.nn.load_state_dict(nn_state)

            optimizer_file_path = os.path.join(self.resume_checkpoint_dir_path, f"optimizer.pt")
            optimizer_state = torch.load(optimizer_file_path, map_location=self.device)
            self.optimizer.load_state_dict(optimizer_state)

    def _save_model(self):
        if self.training_dir_path is None: return
        os.makedirs(self.saving_checkpoint_dir_path, exist_ok=True)

        nn_file_path = os.path.join(self.saving_checkpoint_dir_path, f"nn.pt")
        torch.save(self.nn.state_dict(), nn_file_path)

        optimizer_file_path = os.path.join(self.saving_checkpoint_dir_path, f"optimizer.pt")
        torch.save(self.optimizer.state_dict(), optimizer_file_path)

    # neural

    @property
    def gpt(self) -> GPT:
        return self.nn

    def backward(self, loss: torch.Tensor):
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.nn.parameters(), 1.0, norm_type=2)
        self.optimizer.step()
        self.optimizer.zero_grad()

    def set_learning_rate(self, learning_rate: float):
        self.optimizer.param_groups[0]['lr'] = learning_rate
