import abc
import os
import shutil
import sys
from collections.abc import Callable
from dataclasses import dataclass
from functools import cached_property
from typing import Iterator

import numpy as np
import torch
from zkl_llmpt_datasets import SimpleVocab, TokenizedDataset
from zkl_llmpt_iterator import GPTTrainingIterator, VocabForNLP
from zkl_serialization import dump_and_save_json, load_and_parse_json
from zkl_training import ProcessingTask

from llmpt.neural import GPT
from .model import GPTModelHparams
from .plugin_checkpoint import CheckpointPlugin
from .plugin_metrics import MetricsPlugin
from .plugin_summary import SummaryPlugin
from .plugin_tqdm import TqdmPlugin
from .progress import ProgressRecorder
from .random_offset import apply_random_offset
from .validating import GPTValidating


@dataclass(kw_only=True)
class GPTTrainingHparams:
    name: str | None = None

    batch_samples_n: int
    chunk_tokens_n: int

    learning_rate: float
    warmup_tokens_n: int = int(100e6)
    decay_tokens_n: int = int(100e6)

    train_tokens_n: int | None = None
    valid_tokens_n: int | None = int(1e6)
    summary_tokens_n: int | None = int(20e6)

    ff_dropout: float | None = None
    at_dropout: float | None = None


@dataclass(kw_only=True)
class GPTTrainingInput:
    tokens_in_wid: torch.Tensor
    tokens_in_pos: torch.Tensor
    tokens_out_wid: torch.Tensor
    tokens_out_mask: torch.Tensor


@dataclass(kw_only=True)
class GPTTrainingOutput:
    tokens_out_ce: torch.Tensor
    tokens_out_acc: torch.Tensor
    tokens_out_mask: torch.Tensor
    loss: torch.Tensor


@dataclass(kw_only=True)
class GPTTrainingCreateArgs:
    # path
    training_dir_path: str | None = None

    # hparams
    model_hparams: GPTModelHparams
    training_hparams: GPTTrainingHparams

    # dataset
    train_dataset_factory: Callable[[], TokenizedDataset]
    valid_dataset_factory: Callable[[], TokenizedDataset] | None = None

    # device
    device: torch.device | str | None = None
    compile: bool = False


@dataclass(kw_only=True)
class GPTTrainingResumeArgs:
    # path
    training_dir_path: str
    checkpoint_tokens_n: int | None = None

    # dataset
    train_dataset_factory: Callable[[], TokenizedDataset]
    valid_dataset_factory: Callable[[], TokenizedDataset] | None = None

    # device
    device: torch.device | str | None = None
    compile: bool = False


GPTTrainingInitArgs = GPTTrainingCreateArgs | GPTTrainingResumeArgs


class GPTTraining(ProcessingTask[GPTTrainingInput, GPTTrainingOutput], abc.ABC):
    # path
    training_dir_path: str | None
    resume_checkpoint_tokens_n: int | None

    # hparams
    model_hparams: GPTModelHparams
    training_hparams: GPTTrainingHparams

    # dataset
    train_dataset: TokenizedDataset
    valid_dataset: TokenizedDataset | None

    # device
    device: torch.device | str | None
    compile: bool

    # iterator
    iterator: GPTTrainingIterator

    # validating
    validating_factory: Callable[[], GPTValidating] | None

    # progress
    progress_recorder: ProgressRecorder

    def __init__(self, args: GPTTrainingInitArgs):
        super().__init__()

        # modules
        self._init_path(args)
        self._init_hparams(args)
        self._init_datasets(args)
        self._init_vocab(args)
        self._init_device(args)
        self._init_iterator(args)
        self._init_model(args)
        self._init_validating(args)
        self._init_progress(args)

        # plugins
        self.install(TqdmPlugin())
        self.install(MetricsPlugin())
        self.install(SummaryPlugin())
        self.install(CheckpointPlugin())

    # path

    def _init_path(self, args: GPTTrainingInitArgs):
        self.training_dir_path = args.training_dir_path

        if isinstance(args, GPTTrainingCreateArgs):
            self.resume_checkpoint_tokens_n = None
        if isinstance(args, GPTTrainingResumeArgs):
            self.resume_checkpoint_tokens_n = self.select_checkpoint(
                args.training_dir_path, args.checkpoint_tokens_n)

    # hparams

    @property
    def model_hparams_file_path(self):
        return os.path.join(self.training_dir_path, 'model_hparams.json') \
            if self.training_dir_path is not None else None

    @property
    def training_hparams_file_path(self):
        return os.path.join(self.training_dir_path, 'training_hparams.json') \
            if self.training_dir_path is not None else None

    def _init_hparams(self, args: GPTTrainingInitArgs):
        if isinstance(args, GPTTrainingCreateArgs):
            self.model_hparams = args.model_hparams
            self.training_hparams = args.training_hparams
            self._save_hparams()
        if isinstance(args, GPTTrainingResumeArgs):
            self._load_hparams()

    def _load_hparams(self):
        if self.training_dir_path is None:
            raise ValueError(f"Failed to load hparams! Argument training_dir_path is not set!")
        self.model_hparams = load_and_parse_json(self.model_hparams_file_path, GPTModelHparams)
        self.training_hparams = load_and_parse_json(self.training_hparams_file_path, GPTTrainingHparams)

    def _save_hparams(self):
        if self.training_dir_path is None:
            return
        os.makedirs(self.training_dir_path, exist_ok=True)
        dump_and_save_json(self.model_hparams, self.model_hparams_file_path)
        dump_and_save_json(self.training_hparams, self.training_hparams_file_path)

    # dataset

    def _init_datasets(self, args: GPTTrainingInitArgs):
        self.train_dataset = args.train_dataset_factory()
        self.valid_dataset = args.valid_dataset_factory() \
            if args.valid_dataset_factory is not None else None

    # vocab

    @property
    def vocab_file_path(self):
        return os.path.join(self.training_dir_path, 'vocab.csv') \
            if self.training_dir_path is not None else None

    def _init_vocab(self, args: GPTTrainingInitArgs):
        if isinstance(args, GPTTrainingCreateArgs):
            self._make_vocab()
            self._save_vocab()
        if isinstance(args, GPTTrainingResumeArgs):
            self._load_vocab()

    def _make_vocab(self):
        vocab = self.train_dataset.vocab
        vocab = vocab.truncate(self.model_hparams.vocab_size - len(VocabForNLP.specials))
        self.vocab = VocabForNLP(vocab)

    def _load_vocab(self):
        vocab = SimpleVocab.load_from_file(self.vocab_file_path)
        self.vocab = VocabForNLP(vocab)

    def _save_vocab(self):
        if self.training_dir_path is None:
            return
        os.makedirs(self.training_dir_path, exist_ok=True)
        self.vocab.base.save_to_file(self.vocab_file_path)

    # device

    def _init_device(self, args: GPTTrainingInitArgs):
        self.device = args.device
        self.compile = args.compile

    # iterator

    def _init_iterator(self, args: GPTTrainingInitArgs):
        if isinstance(args, GPTTrainingCreateArgs):
            self.iterator = GPTTrainingIterator.create(
                dataset_factory=lambda: self.train_dataset,
                vocab_factory=lambda: self.vocab,
                batch_samples_n=self.training_hparams.batch_samples_n,
                chunk_tokens_n=self.training_hparams.chunk_tokens_n,
                device=self.device)
        if isinstance(args, GPTTrainingResumeArgs):
            iterator_file_path = os.path.join(self.resume_checkpoint_dir_path, "iterator.json")
            self.iterator = GPTTrainingIterator.resume(
                state_file_path=iterator_file_path,
                dataset_factory=lambda: self.train_dataset,
                vocab_factory=lambda: self.vocab,
                batch_samples_n=self.training_hparams.batch_samples_n,
                chunk_tokens_n=self.training_hparams.chunk_tokens_n,
                device=self.device)

    def _save_iterator(self):
        if self.training_dir_path is None:
            return
        os.makedirs(self.saving_checkpoint_dir_path, exist_ok=True)
        iterator_file_path = os.path.join(self.saving_checkpoint_dir_path, "iterator.json")
        self.iterator.pause(iterator_file_path)

    # model

    @abc.abstractmethod
    def _init_model(self, args: GPTTrainingInitArgs):
        pass

    @abc.abstractmethod
    def _save_model(self):
        pass

    # validating

    def _init_validating(self, args: GPTTrainingInitArgs):
        self.validating_factory = lambda: GPTValidating.create_from_training(
            training=self,
            dataset_factory=lambda: self.valid_dataset,
            vocab_factory=lambda: self.vocab) \
            if args.valid_dataset_factory is not None else None

    # progress

    @property
    def progress_tokens_n(self) -> int:
        return self.progress_recorder.get()

    def _init_progress(self, args: GPTTrainingInitArgs):
        if isinstance(args, GPTTrainingCreateArgs):
            self.progress_recorder = ProgressRecorder(0)
        if isinstance(args, GPTTrainingResumeArgs):
            self.progress_recorder = ProgressRecorder(self.resume_checkpoint_tokens_n)

    # checkpoint

    @property
    def checkpoints_dir_path(self):
        return os.path.join(self.training_dir_path, 'checkpoints') \
            if self.training_dir_path is not None else None

    def checkpoint_dir_path(self, checkpoint_tokens_n: int):
        return os.path.join(self.checkpoints_dir_path, str(checkpoint_tokens_n)) \
            if self.training_dir_path is not None else None

    @property
    def resume_checkpoint_dir_path(self):
        return self.checkpoint_dir_path(self.resume_checkpoint_tokens_n) \
            if self.training_dir_path is not None and self.resume_checkpoint_tokens_n is not None else None

    @property
    def saving_checkpoint_dir_path(self):
        return self.checkpoint_dir_path(self.progress_tokens_n)

    def save_checkpoint(self):
        self._save_checkpoint()
        self._clean_checkpoints()

    def _save_checkpoint(self):
        self._save_iterator()
        self._save_model()

    def _clean_checkpoints(self):
        def keep_by_interval_rule(interval: int | float):
            def rule(checkpoints_tokens_n: list[int]):
                sorted_indices = np.argsort(checkpoints_tokens_n)
                step = interval
                keep_indices = []
                for index in sorted_indices:
                    checkpoint_step = checkpoints_tokens_n[index]
                    if checkpoint_step >= step:
                        keep_indices.append(index)
                        while checkpoint_step >= step:
                            step += interval

                checkpoints_keep = np.zeros(len(checkpoints_tokens_n), dtype=bool)
                checkpoints_keep[keep_indices] = True
                return checkpoints_keep

            return rule

        def keep_last_n_rule(n: int):
            def rule(checkpoints_tokens_n: list[int]):
                sorted_indices = np.argsort(checkpoints_tokens_n)
                keep_indices = sorted_indices[-n:]
                checkpoints_keep = np.zeros(len(checkpoints_tokens_n), dtype=bool)
                checkpoints_keep[keep_indices] = True
                return checkpoints_keep

            return rule

        rules = [keep_last_n_rule(3), keep_by_interval_rule(500e6)]

        checkpoints_progress_n = list(self._iter_checkpoints(self.checkpoints_dir_path))
        rules_checkpoints_keep = [rule(checkpoints_progress_n) for rule in rules]
        checkpoints_keep = np.any(rules_checkpoints_keep, axis=0)
        for progress_tokens_n, keep in zip(checkpoints_progress_n, checkpoints_keep):
            if not keep:
                checkpoint_path = self.checkpoint_dir_path(progress_tokens_n)
                shutil.rmtree(checkpoint_path, ignore_errors=True)
                print(f"Removed {checkpoint_path}", file=sys.stderr)

    @classmethod
    def iter_checkpoints(cls, training_dir_path: str) -> Iterator[int]:
        checkpoints_dir_path = os.path.join(training_dir_path, 'checkpoints')
        return cls._iter_checkpoints(checkpoints_dir_path)

    @classmethod
    def select_checkpoint(cls,
        training_dir_path: str,
        checkpoint_tokens_n: int | None = None,
    ) -> int:
        if checkpoint_tokens_n is not None:
            return checkpoint_tokens_n

        latest_checkpoint_tokens_n = max(cls.iter_checkpoints(training_dir_path), default=None)
        if latest_checkpoint_tokens_n is None:
            raise ValueError(f"No checkpoints found in {training_dir_path}")
        return latest_checkpoint_tokens_n

    @classmethod
    def _iter_checkpoints(cls, checkpoints_dir_path: str) -> Iterator[int]:
        for checkpoint_name in os.listdir(checkpoints_dir_path):
            if checkpoint_name.isdigit():
                yield int(checkpoint_name)

    # task

    def _before_step(self):
        # early stop by progress
        if self.training_hparams.train_tokens_n is not None:
            if self.progress_tokens_n >= self.training_hparams.train_tokens_n:
                raise StopIteration

        # dynamic learning rate
        if self.progress_tokens_n <= self.training_hparams.warmup_tokens_n:
            k = min(1.0, self.progress_tokens_n / self.training_hparams.warmup_tokens_n)
        else:
            k = (self.progress_tokens_n - self.training_hparams.warmup_tokens_n) / self.training_hparams.decay_tokens_n
            k = np.exp2(-k)
            k = 0.1 + 0.9 * k
        self.set_learning_rate(self.training_hparams.learning_rate * k)

        super()._before_step()

    def _next(self) -> GPTTrainingInput:
        return process_data_batch(next(self.iterator))

    def _process(self, input: GPTTrainingInput) -> GPTTrainingOutput:
        output = self.forward(input)
        self.backward(output.loss)
        return output

    def _after_process(self, output: GPTTrainingOutput):
        self.progress_recorder.increase(output.tokens_out_mask)
        super()._after_process(output)

    # neural

    @property
    @abc.abstractmethod
    def gpt(self) -> GPT:
        pass

    @cached_property
    def forward_kernel(self):
        return torch.compile(forward_kernel) if self.compile else forward_kernel

    def forward(self, input: GPTTrainingInput) -> GPTTrainingOutput:
        return self.forward_kernel(self.gpt, input,
            position_period=2 ** (self.model_hparams.pos_size // 2),
            at_dropout=self.training_hparams.at_dropout,
            ff_dropout=self.training_hparams.ff_dropout)

    @abc.abstractmethod
    def backward(self, loss: torch.Tensor):
        pass

    @abc.abstractmethod
    def set_learning_rate(self, learning_rate: float):
        pass


def process_data_batch(
    batch: tuple[torch.Tensor, torch.Tensor, torch.Tensor]
) -> GPTTrainingInput:
    tile_tokens, tile_offset, doc_index = batch
    tokens_in_wid = tile_tokens[:, :-1]
    tokens_in_pos = torch.arange(0, tokens_in_wid.shape[-1], dtype=tile_offset.dtype, device=tile_offset.device)
    tokens_in_pos = tokens_in_pos + torch.unsqueeze(tile_offset, dim=-1)
    tokens_out_wid = tile_tokens[:, 1:]
    tokens_out_mask = torch.ge(tokens_out_wid, 1)
    return GPTTrainingInput(
        tokens_in_wid=tokens_in_wid,
        tokens_in_pos=tokens_in_pos,
        tokens_out_wid=tokens_out_wid,
        tokens_out_mask=tokens_out_mask)


def forward_kernel(
    gpt: GPT, input: GPTTrainingInput, *,
    position_period: torch.Tensor | float = 0.0,
    at_dropout: torch.Tensor | float = 0.0,
    ff_dropout: torch.Tensor | float = 0.0,
) -> GPTTrainingOutput:
    tokens_in_wid = input.tokens_in_wid
    tokens_in_pos = input.tokens_in_pos
    tokens_out_wid = input.tokens_out_wid
    tokens_out_mask = input.tokens_out_mask

    tokens_in_pos = tokens_in_pos.to(dtype=torch.float64)
    tokens_in_pos = apply_random_offset(tokens_in_pos, period=position_period)

    tokens_out_logits, _ = gpt.forward(
        tokens_in_wid, tokens_in_pos,
        at_dropout=at_dropout,
        ff_dropout=ff_dropout)
    # [batch_size, chunk_size, vocab_size]

    tokens_out_probs = torch.softmax(tokens_out_logits, dim=-1)
    # [batch_size, chunk_size, vocab_size]

    tokens_out_ce = torch.nn.functional.cross_entropy(
        torch.swapaxes(tokens_out_logits, -1, -2),
        tokens_out_wid, reduction='none')
    # [batch_size, chunk_size]

    tokens_out_acc = torch.gather(tokens_out_probs, dim=-1, index=tokens_out_wid.unsqueeze(-1)).squeeze(-1)
    # [batch_size, chunk_size]

    loss = torch.masked.mean(tokens_out_ce, mask=tokens_out_mask)
    # []

    return GPTTrainingOutput(
        tokens_out_ce=tokens_out_ce,
        tokens_out_acc=tokens_out_acc,
        tokens_out_mask=tokens_out_mask,
        loss=loss)
