import abc
import os
import shutil
import sys
from dataclasses import dataclass

import numpy as np
import torch
from zkl_serialization import dump_and_save_json, load_and_parse_json
from zkl_training import ProcessingTask

from llmpt.preprocess import SimpleVocab, TokenizedDataset
from .iterator import PreprocessedDatasetIterator
from .model import GPTModel
from .plugin_checkpoint import CheckpointPlugin
from .plugin_metrics import MetricsPlugin
from .plugin_summary import SummaryPlugin
from .plugin_tqdm import TqdmPlugin
from .progress import ProgressRecorder
from .random_offset import apply_random_offset
from .validating import GPTValidating
from .vocab import VocabForNLP

Input = tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
Output = tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]


class GPTTraining(ProcessingTask, abc.ABC):
    @dataclass(kw_only=True)
    class Hyperparams:
        name: str | None = None

        model: GPTModel.Hyperparams

        batch_samples_n: int
        context_tokens_n: int
        striding_tokens_n: int = None

        learning_rate: float
        warmup_tokens_n: int = int(100e6)
        decay_tokens_n: int = int(100e6)

        train_tokens_n: int | None = None
        train_repeats_n: int | None = None
        valid_tokens_n: int | None = int(1e6)
        summary_tokens_n: int | None = int(20e6)

        ff_dropout: float | None = None
        at_dropout: float | None = None

        def __post_init__(self):
            if self.striding_tokens_n is None:
                self.striding_tokens_n = self.context_tokens_n
            if self.train_repeats_n is None and self.train_tokens_n is None:
                self.train_repeats_n = 1

    def __init__(self, *,
        # path
        path: str | None = None,

        # resuming
        resume_path: str | None = None,
        resume_checkpoint_tokens_n: int | None = None,

        # hyperparams
        hyperparams: Hyperparams | None = None,

        # dataset
        train_dataset: TokenizedDataset,
        valid_dataset: TokenizedDataset | None = None,

        # device
        device: torch.device | str | None = None,
        compile: bool = False,
    ):
        super().__init__()

        # path
        if path is not None:
            os.makedirs(path, exist_ok=True)
        self.path = path

        # resuming
        if resume_path is None:
            resume_checkpoint_tokens_n = None
            resume_checkpoint_dir_path = None
        else:
            resume_checkpoints_dir_path = os.path.join(resume_path, 'checkpoints')
            resume_checkpoint_tokens_n = self._select_checkpoint(resume_checkpoints_dir_path, resume_checkpoint_tokens_n)
            resume_checkpoint_dir_path = os.path.join(resume_checkpoints_dir_path, str(resume_checkpoint_tokens_n))

        # hyperparams
        if resume_path is None:
            if hyperparams is None:
                raise ValueError("Failed to determine hyperparams!")
        else:
            if hyperparams is None:
                hyperparams_file_path = os.path.join(resume_path, 'hyperparams.json')
                hyperparams = load_and_parse_json(hyperparams_file_path, GPTTraining.Hyperparams)
        if path is not None and path != resume_path:
            hyperparams_file_path = os.path.join(path, 'hyperparams.json')
            dump_and_save_json(hyperparams, hyperparams_file_path)
        self.hyperparams = hyperparams

        # dataset
        self.train_dataset = train_dataset
        self.valid_dataset = valid_dataset

        # vocab
        if resume_path is None:
            vocab = train_dataset.vocab
            vocab = vocab.truncate(hyperparams.model.vocab_size - len(VocabForNLP.specials))
        else:
            vocab_file_path = os.path.join(path, 'vocab.csv')
            vocab = SimpleVocab.load_from_file(vocab_file_path)
        if path is not None and path != resume_path:
            vocab_file_path = os.path.join(path, 'vocab.csv')
            vocab.save_to_file(vocab_file_path)
        self.vocab = VocabForNLP(vocab)

        # device
        self.device = device
        self.compile = compile

        # iterator
        if resume_path is None:
            self.iterator = PreprocessedDatasetIterator(
                dataset=self.train_dataset, vocab=self.vocab,
                limit_repeats_n=self.hyperparams.train_repeats_n,
                context_tokens_n=self.hyperparams.context_tokens_n,
                striding_tokens_n=self.hyperparams.striding_tokens_n,
                batch_samples_n=self.hyperparams.batch_samples_n,
                device=self.device)
        else:
            iterator_file_path = os.path.join(resume_checkpoint_dir_path, f"iterator.json")
            skip_docs_n = load_and_parse_json(iterator_file_path)['iterated_docs_n']
            self.iterator = PreprocessedDatasetIterator(
                dataset=self.train_dataset, vocab=self.vocab,
                limit_repeats_n=self.hyperparams.train_repeats_n,
                context_tokens_n=self.hyperparams.context_tokens_n,
                striding_tokens_n=self.hyperparams.striding_tokens_n,
                batch_samples_n=self.hyperparams.batch_samples_n,
                device=self.device, skip_docs_n=skip_docs_n)

        # progress
        if resume_path is None:
            self.progress_recorder = ProgressRecorder()
        else:
            self.progress_recorder = ProgressRecorder(resume_checkpoint_tokens_n)

        # plugins
        self.install(TqdmPlugin())
        self.install(MetricsPlugin())
        self.install(SummaryPlugin(lambda training: GPTValidating.from_training(training).run()))
        self.install(CheckpointPlugin())

    @property
    def progress_tokens_n(self) -> int:
        return self.progress_recorder.get()

    # resuming

    def save_checkpoint(self):
        self._save_checkpoint()
        self._clean_checkpoints()

    def _save_checkpoint(self):
        checkpoints_dir_path = os.path.join(self.path, 'checkpoints')
        checkpoint_dir_path = os.path.join(checkpoints_dir_path, str(self.progress_tokens_n))
        os.makedirs(checkpoint_dir_path, exist_ok=True)

        iterator_file_path = os.path.join(checkpoint_dir_path, f"iterator.json")
        dump_and_save_json({'iterated_docs_n': self.iterator.iterated_docs_n}, iterator_file_path)

    def _clean_checkpoints(self):
        def keep_by_interval_rule(interval: int | float):
            def rule(checkpoints_progress_tokens_n: list[int]):
                sorted_indices = np.argsort(checkpoints_progress_tokens_n)
                step = interval
                keep_indices = []
                for index in sorted_indices:
                    checkpoint_step = checkpoints_progress_tokens_n[index]
                    if checkpoint_step >= step:
                        keep_indices.append(index)
                        while checkpoint_step >= step:
                            step += interval

                checkpoints_keep = np.zeros(len(checkpoints_progress_tokens_n), dtype=bool)
                checkpoints_keep[keep_indices] = True
                return checkpoints_keep

            return rule

        def keep_last_n_rule(n: int):
            def rule(checkpoints_progress_tokens_n: list[int]):
                sorted_indices = np.argsort(checkpoints_progress_tokens_n)
                keep_indices = sorted_indices[-n:]
                checkpoints_keep = np.zeros(len(checkpoints_progress_tokens_n), dtype=bool)
                checkpoints_keep[keep_indices] = True
                return checkpoints_keep

            return rule

        keep_rules = [keep_last_n_rule(3), keep_by_interval_rule(500e6)]

        checkpoints_dir_path = os.path.join(self.path, 'checkpoints')

        checkpoints_progress_tokens_n = []
        for checkpoint_name in os.listdir(checkpoints_dir_path):
            if checkpoint_name.isdigit():
                checkpoints_progress_tokens_n.append(int(checkpoint_name))

        keep_rules_checkpoints_keep = [rule(checkpoints_progress_tokens_n) for rule in keep_rules]
        checkpoints_keep = np.any(keep_rules_checkpoints_keep, axis=0)
        for progress_tokens_n, keep in zip(checkpoints_progress_tokens_n, checkpoints_keep):
            if not keep:
                checkpoint_path = os.path.join(checkpoints_dir_path, str(progress_tokens_n))
                shutil.rmtree(checkpoint_path, ignore_errors=True)
                print(f"Removed {checkpoint_path}", file=sys.stderr)

    @classmethod
    def _select_checkpoint(cls, checkpoints_dir_path: str, progress_tokens_n: int | None = None) -> int:
        if progress_tokens_n is None:
            progress_tokens_n = None
            for checkpoint_name in os.listdir(checkpoints_dir_path):
                if checkpoint_name.isdigit():
                    checkpoint_progress_tokens_n = int(checkpoint_name)
                    if progress_tokens_n is None or progress_tokens_n < checkpoint_progress_tokens_n:
                        progress_tokens_n = checkpoint_progress_tokens_n
            if progress_tokens_n is None:
                raise ValueError(f"No checkpoints found in {checkpoints_dir_path}")
        assert isinstance(progress_tokens_n, int)

        checkpoint_dir_path = os.path.join(checkpoints_dir_path, str(progress_tokens_n))
        if not os.path.exists(checkpoint_dir_path):
            raise ValueError(f"Checkpoint {progress_tokens_n} not found in {checkpoints_dir_path}")
        if not os.path.isdir(checkpoint_dir_path):
            raise ValueError(f"Checkpoint {progress_tokens_n} is not a directory in {checkpoints_dir_path}")
        return progress_tokens_n

    # task

    def _before_run(self):
        super()._before_run()

        if self.compile:
            self.forward_logits = torch.compile(self.forward_logits)
            self.forward_metrics = torch.compile(self.forward_metrics)

    def _before_step(self):
        # early stop by progress
        if self.hyperparams.train_tokens_n is not None:
            if self.progress_tokens_n >= self.hyperparams.train_tokens_n:
                raise StopIteration

        # dynamic learning rate
        if self.progress_tokens_n <= self.hyperparams.warmup_tokens_n:
            k = min(1.0, self.progress_tokens_n / self.hyperparams.warmup_tokens_n)
        else:
            k = (self.progress_tokens_n - self.hyperparams.warmup_tokens_n) / self.hyperparams.decay_tokens_n
            k = np.exp2(-k)
            k = 0.1 + 0.9 * k
        self.set_learning_rate(self.hyperparams.learning_rate * k)

        super()._before_step()

    def _next(self) -> Input:
        return next(self.iterator)

    def _process(self, input: Input) -> Output:
        tokens_in_wid, tokens_in_pos, tokens_out_wid, tokens_out_mask, progress_tokens_n = input

        # forward, backward
        loss, out_tokens_ce, out_tokens_acc = self.forward(tokens_in_wid, tokens_in_pos, tokens_out_wid, tokens_out_mask)
        self.backward(loss)

        # progress
        self.progress_recorder.increase(progress_tokens_n)

        return out_tokens_ce, out_tokens_acc, tokens_out_mask, progress_tokens_n

    # neural

    def forward(self,
        tokens_in_wid: torch.Tensor,
        tokens_in_pos: torch.Tensor,
        tokens_out_wid: torch.Tensor,
        tokens_out_mask: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        tokens_in_pos = tokens_in_pos.to(dtype=torch.float64, device=tokens_in_wid.device)
        tokens_in_pos = apply_random_offset(tokens_in_pos, period=2 ** (self.hyperparams.model.pos_size // 2))
        tokens_out_logits = self.forward_logits(tokens_in_wid, tokens_in_pos)
        loss, tokens_ce, tokens_acc = self.forward_metrics(tokens_out_logits, tokens_out_wid, tokens_out_mask)
        return loss, tokens_ce, tokens_acc

    @abc.abstractmethod
    def forward_logits(self,
        in_tokens: torch.Tensor,
        in_positions: torch.Tensor,
    ) -> torch.Tensor:
        pass

    def forward_metrics(self,
        tokens_out_logits: torch.Tensor,
        tokens_out_wid: torch.Tensor,
        tokens_out_mask: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        tokens_out_probs = torch.softmax(tokens_out_logits, dim=-1)
        # [batch_size, context_size, vocab_size]

        tokens_ce = torch.nn.functional.cross_entropy(
            torch.swapaxes(tokens_out_logits, -1, -2),
            tokens_out_wid, reduction='none')
        # [batch_size, context_size]

        tokens_acc = torch.gather(tokens_out_probs, dim=-1, index=tokens_out_wid.unsqueeze(-1)).squeeze(-1)
        # [batch_size, context_size]

        loss = torch.masked.mean(tokens_ce, mask=tokens_out_mask)
        # []

        tokens_ce = tokens_ce.detach().to(torch.float32)
        tokens_acc = tokens_acc.detach().to(torch.float32)
        return loss, tokens_ce, tokens_acc

    @abc.abstractmethod
    def backward(self, loss: torch.Tensor):
        pass

    @abc.abstractmethod
    def set_learning_rate(self, learning_rate: float):
        pass
