import os

import torch

from llmpt.preprocess import TokenizedDataset
from .model import GPTModel
from .training import GPTTraining


class GPTTrainingSimple(GPTTraining):
    def __init__(self, *,
        # path
        path: str | None = None,

        # resuming
        resume_path: str | None = None,
        resume_checkpoint_tokens_n: int | None = None,

        # hyperparams
        hyperparams: GPTTraining.Hyperparams | None = None,

        # dataset
        train_dataset: TokenizedDataset,
        valid_dataset: TokenizedDataset | None = None,

        # device
        device: torch.device | str | None = None,
        compile: bool = False,
    ):
        if resume_path is None:
            resume_checkpoint_tokens_n = None
            resume_checkpoint_dir_path = None
        else:
            resume_checkpoints_dir_path = os.path.join(resume_path, 'checkpoints')
            resume_checkpoint_tokens_n = self._select_checkpoint(resume_checkpoints_dir_path, resume_checkpoint_tokens_n)
            resume_checkpoint_dir_path = os.path.join(resume_checkpoints_dir_path, str(resume_checkpoint_tokens_n))

        super().__init__(
            path=path,
            resume_path=resume_path,
            resume_checkpoint_tokens_n=resume_checkpoint_tokens_n,
            hyperparams=hyperparams,
            train_dataset=train_dataset,
            valid_dataset=valid_dataset,
            device=device,
            compile=compile)

        # nn & optimizer
        self.nn = GPTModel.construct_nn(self.hyperparams.model, self.device)
        self.optimizer = torch.optim.Adam(
            self.nn.parameters(),
            lr=self.hyperparams.learning_rate)

        if resume_path is not None:
            nn_file_path = os.path.join(resume_checkpoint_dir_path, f"nn.pt")
            nn_state = torch.load(nn_file_path, map_location=self.device)
            self.nn.load_state_dict(nn_state)

            optimizer_file_path = os.path.join(resume_checkpoint_dir_path, f"optimizer.pt")
            optimizer_state = torch.load(optimizer_file_path, map_location=self.device)
            self.optimizer.load_state_dict(optimizer_state)

    # resuming

    def _save_checkpoint(self):
        super()._save_checkpoint()

        checkpoints_dir_path = os.path.join(self.path, 'checkpoints')
        checkpoint_dir_path = os.path.join(checkpoints_dir_path, str(self.progress_tokens_n))
        os.makedirs(checkpoint_dir_path, exist_ok=True)

        nn_file_path = os.path.join(checkpoint_dir_path, f"nn.pt")
        torch.save(self.nn.state_dict(), nn_file_path)

        optimizer_file_path = os.path.join(checkpoint_dir_path, f"optimizer.pt")
        torch.save(self.optimizer.state_dict(), optimizer_file_path)

    # neural

    def forward_logits(self, in_tokens: torch.Tensor, in_positions: torch.Tensor) -> torch.Tensor:
        out_tokens_logits, _ = self.nn.forward(
            in_tokens, in_positions,
            ff_dropout=self.hyperparams.ff_dropout,
            at_dropout=self.hyperparams.at_dropout)
        return out_tokens_logits

    def backward(self, loss: torch.Tensor):
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.nn.parameters(), 1.0, norm_type=2)
        self.optimizer.step()
        self.optimizer.zero_grad()

    def set_learning_rate(self, learning_rate: float):
        self.optimizer.param_groups[0]['lr'] = learning_rate
