import os
from collections import namedtuple

import deepspeed
import torch
import torch.distributed
from zkl_serialization import load_and_parse_json

from llmpt.preprocess import SimpleVocab, TokenizedDataset
from .model import GPTModel, VocabForNLP
from .training import GPTTraining

DeepSpeedArgs = namedtuple('DeepSpeedArgs', 'deepspeed_config')


class GPTTrainingDeepspeed(GPTTraining):
    def __init__(self, *,
        # path
        path: str | None = None,

        # resuming
        resume_path: str | None = None,
        resume_checkpoint_tokens_n: int | None = None,

        # hyperparams
        hyperparams: GPTTraining.Hyperparams | None = None,

        # dataset
        train_dataset: TokenizedDataset,
        valid_dataset: TokenizedDataset | None = None,

        # device
        device: torch.device | str | None = None,
        compile: bool = True,
        bf16: bool = True,
    ):
        if resume_path is None:
            resume_checkpoints_dir_path = None
            resume_checkpoint_tokens_n = None
        else:
            resume_checkpoints_dir_path = os.path.join(resume_path, 'checkpoints')
            resume_checkpoint_tokens_n = self._select_checkpoint(resume_checkpoints_dir_path, resume_checkpoint_tokens_n)

        super().__init__(
            path=path,
            resume_path=resume_path,
            resume_checkpoint_tokens_n=resume_checkpoint_tokens_n,
            hyperparams=hyperparams,
            train_dataset=train_dataset,
            valid_dataset=valid_dataset,
            device=device,
            compile=compile)

        # engine & optimizer
        nn = GPTModel.construct_nn(self.hyperparams.model, self.device)
        nn(torch.zeros([1, 1], dtype=torch.int64, device=self.device))

        deepspeed_args = DeepSpeedArgs({
            "bf16": {"enabled": bool(bf16)},
            "zero_optimization": {"stage": 2},
            "optimizer": {"type": "AdamW", "params": {"lr": self.hyperparams.learning_rate}},
            "gradient_clipping": 1.0,
            "train_micro_batch_size_per_gpu": self.hyperparams.batch_samples_n
        })
        self.engine, self.optimizer, _, _ = deepspeed.initialize(deepspeed_args, model=nn)

        if resume_path is not None:
            self.engine.load_checkpoint(resume_checkpoints_dir_path, str(resume_checkpoint_tokens_n))

    # resuming

    def _save_checkpoint(self):
        super()._save_checkpoint()

        checkpoints_dir_path = os.path.join(self.path, 'checkpoints')

        self.engine.save_checkpoint(checkpoints_dir_path, str(self.progress_tokens_n))

    def _clean_checkpoints(self):
        try:
            process_rank = torch.distributed.get_rank()
        except ValueError:
            process_rank = 0

        if process_rank == 0:
            super()._clean_checkpoints()

    @classmethod
    def load_model_from_checkpoint(cls, *,
        training_dir_path: str | None = None,
        checkpoint_tokens_n: int | None = None,
        device: torch.device | str | None = None
    ) -> GPTModel:
        checkpoints_dir_path = os.path.join(training_dir_path, 'checkpoints')
        checkpoint_tokens_n = cls._select_checkpoint(checkpoints_dir_path, checkpoint_tokens_n)
        checkpoint_dir_path = os.path.join(checkpoints_dir_path, str(checkpoint_tokens_n))

        hyperparams_file_path = os.path.join(training_dir_path, 'hyperparams.json')
        hyperparams = load_and_parse_json(hyperparams_file_path, GPTTraining.Hyperparams)
        hyperparams = hyperparams.model

        vocab_file_path = os.path.join(training_dir_path, 'vocab.csv')
        vocab = SimpleVocab.load_from_file(vocab_file_path)
        vocab = VocabForNLP(vocab)

        nn = GPTModel.construct_nn(hyperparams=hyperparams, device=device)
        nn_state_file_path = os.path.join(checkpoint_dir_path, "mp_rank_00_model_states.pt")
        nn_state = torch.load(nn_state_file_path, map_location=device or torch.get_default_device())
        nn.load_state_dict(nn_state['module'])

        return GPTModel(hyperparams=hyperparams, vocab=vocab, nn=nn)

    # neural

    def forward_logits(self, in_tokens: torch.Tensor, in_positions: torch.Tensor) -> torch.Tensor:
        out_tokens_logits, _ = self.engine.forward(
            in_tokens, in_positions,
            ff_dropout=self.hyperparams.ff_dropout,
            at_dropout=self.hyperparams.at_dropout)
        return out_tokens_logits

    def backward(self, loss: torch.Tensor):
        self.engine.backward(loss)
        self.engine.step()
        self.engine.zero_grad()

    def set_learning_rate(self, learning_rate: float):
        self.optimizer.param_groups[0]['lr'] = learning_rate
