import os
from collections import namedtuple
from dataclasses import dataclass

import deepspeed
import torch
import torch.distributed
from deepspeed import DeepSpeedEngine
from zkl_llmpt_datasets import SimpleVocab
from zkl_llmpt_iterator import VocabForNLP
from zkl_serialization import load_and_parse_json

from llmpt.neural import GPT
from .model import GPTModel, GPTModelHparams
from .training import GPTTraining, GPTTrainingCreateArgs, GPTTrainingHparams, GPTTrainingResumeArgs
from .utils import torch_distributed_is_rank0

DeepSpeedArgs = namedtuple('DeepSpeedArgs', 'deepspeed_config')


@dataclass(kw_only=True)
class GPTTrainingDeepspeedCreateArgs(GPTTrainingCreateArgs):
    compile: bool = True
    bf16: bool = True


@dataclass(kw_only=True)
class GPTTrainingDeepspeedResumeArgs(GPTTrainingResumeArgs):
    compile: bool = True
    bf16: bool = True


GPTTrainingDeepspeedInitArgs = GPTTrainingDeepspeedCreateArgs | GPTTrainingDeepspeedResumeArgs


class GPTTrainingDeepspeed(GPTTraining):
    engine: DeepSpeedEngine

    def __init__(self, args: GPTTrainingDeepspeedInitArgs):
        super().__init__(args)

    # rank0-only io

    def _save_hparams(self):
        if torch_distributed_is_rank0():
            super()._save_hparams()

    def _init_datasets(self, args: GPTTrainingDeepspeedInitArgs):
        if torch_distributed_is_rank0():
            super()._init_datasets(args)

    def _save_vocab(self):
        if torch_distributed_is_rank0():
            super()._save_vocab()

    def _save_iterator(self):
        if torch_distributed_is_rank0():
            super()._save_iterator()

    def _save_checkpoint(self):
        if torch_distributed_is_rank0():
            super()._save_checkpoint()

    def _clean_checkpoints(self):
        if torch_distributed_is_rank0():
            super()._clean_checkpoints()

    # model

    @classmethod
    def construct_engine(cls, *,
        model_hparams: GPTModelHparams,
        training_hparams: GPTTrainingHparams,
        device: torch.device | str | None = 'cuda',
        bf16: bool = True,
    ) -> DeepSpeedEngine:
        nn = GPTModel.construct_nn(model_hparams, device)
        nn(torch.zeros([1, 1], dtype=torch.int64, device=device))

        deepspeed_args = DeepSpeedArgs({
            "bf16": {"enabled": bool(bf16)},
            "zero_optimization": {"stage": 2},
            "train_micro_batch_size_per_gpu": training_hparams.batch_samples_n,
            "optimizer": {"type": "AdamW", "params": {"lr": training_hparams.learning_rate}},
            "gradient_clipping": 1.0})
        engine, _, _, _ = deepspeed.initialize(deepspeed_args, model=nn)

        return engine

    def _init_model(self, args: GPTTrainingDeepspeedInitArgs):
        self.engine = GPTTrainingDeepspeed.construct_engine(
            model_hparams=args.model_hparams,
            training_hparams=args.training_hparams,
            device=args.device, bf16=args.bf16)

        if isinstance(args, GPTTrainingResumeArgs):
            self.engine.load_checkpoint(self.checkpoints_dir_path, str(self.resume_checkpoint_tokens_n))

    def _save_model(self):
        if self.training_dir_path is None: return
        self.engine.save_checkpoint(self.checkpoints_dir_path, str(self.progress_tokens_n))

    # neural

    @property
    def gpt(self) -> GPT:
        # noinspection PyTypeChecker
        return self.engine

    def backward(self, loss: torch.Tensor):
        self.engine.backward(loss)
        self.engine.step()
        self.engine.zero_grad()

    def set_learning_rate(self, learning_rate: float):
        self.engine.optimizer.param_groups[0]['lr'] = learning_rate


def extract_model_from_deepspeed_checkpoint(*,
    training_dir_path: str | None = None,
    checkpoint_tokens_n: int | None = None,
    device: torch.device | str | None = None
) -> GPTModel:
    checkpoint_tokens_n = GPTTraining.select_checkpoint(training_dir_path, checkpoint_tokens_n)
    checkpoint_dir_path = os.path.join(training_dir_path, "checkpoints", str(checkpoint_tokens_n))

    model_hparams_file_path = os.path.join(training_dir_path, 'model_hparams.json')
    model_hparams = load_and_parse_json(model_hparams_file_path, GPTModelHparams)

    vocab_file_path = os.path.join(training_dir_path, 'vocab.csv')
    vocab = SimpleVocab.load_from_file(vocab_file_path)
    vocab = VocabForNLP(vocab)

    nn = GPTModel.construct_nn(hparams=model_hparams, device=device)
    nn_state_file_path = os.path.join(checkpoint_dir_path, "mp_rank_00_model_states.pt")
    nn_state = torch.load(nn_state_file_path, map_location=device or torch.get_default_device())
    nn.load_state_dict(nn_state['module'])

    return GPTModel(hparams=model_hparams, vocab=vocab, nn=nn)
