import abc
from dataclasses import dataclass
from itertools import count
from typing import Iterable

import numpy as np
import torch
from transformers import PreTrainedModel

from .batching import iter_rewritings_cache, make_rewritings_cache
from .rewriting import TokenizedRewriting


@dataclass(kw_only=True)
class FinetuneHparams:
    learning_rate: float
    rewriting_loss_k: float = 1.0
    # preserving_loss_k: float = 1.0
    regularization_loss_k: float = 0.1
    # regularization_constraint_factor: float | None = None

    batch_samples_num: int = 128
    context_tokens_num: int = 40

    stopping_epochs_num: int | None = 20
    stopping_ce_threshold: float | None = 1e-2
    stopping_acc_threshold: float | None = 0.99


@dataclass(kw_only=True)
class FinetuneMetrics:
    processed_epochs_num: int
    rewriting_ce: np.ndarray
    rewriting_acc: np.ndarray


class FinetuneCallback(abc.ABC):
    @abc.abstractmethod
    def on_start(self, hparams: FinetuneHparams):
        pass

    @abc.abstractmethod
    def on_epoch_start(self, epoch_i: int):
        pass

    @abc.abstractmethod
    def on_epoch_stop(self, metrics: FinetuneMetrics):
        pass

    @abc.abstractmethod
    def on_stop(self, metrics: FinetuneMetrics | None):
        pass


def apply_finetune(*,
    model: PreTrainedModel,
    rewritings: Iterable[TokenizedRewriting],
    hparams: FinetuneHparams,
    callback: FinetuneCallback | None = None,
):
    rewritings_cache = make_rewritings_cache(rewritings,
        context_size=hparams.context_tokens_num,
        device=model.device)

    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=hparams.learning_rate,
        weight_decay=hparams.regularization_loss_k)

    if callback is not None:
        callback.on_start(hparams)

    metrics = None
    for epoch_i in count():
        if callback is not None:
            callback.on_epoch_start(epoch_i)

        epoch_num = 0
        epoch_ce_sum = 0
        epoch_acc_sum = 0
        for batch in iter_rewritings_cache(rewritings_cache, batch_size=hparams.batch_samples_num):
            (batch_tokens_in_wid, batch_tokens_in_mask, batch_tokens_out_wid, batch_tokens_out_mask) = batch

            batch_tokens_out_logits = model(
                input_ids=batch_tokens_in_wid,
                attention_mask=batch_tokens_in_mask
            ).logits
            # [batch_size, context_size, vocab_size]

            batch_tokens_out_ce = torch.nn.functional.cross_entropy(
                torch.swapaxes(batch_tokens_out_logits, -1, -2),
                batch_tokens_out_wid, reduction='none')
            # [batch_size, context_size]

            batch_tokens_out_acc = torch.gather(
                torch.softmax(batch_tokens_out_logits, dim=-1),
                dim=-1, index=batch_tokens_out_wid.unsqueeze(-1)).squeeze(-1)
            # [batch_size, context_size]

            ce = torch.masked.mean(batch_tokens_out_ce, mask=batch_tokens_out_mask)
            # []

            loss = ce * hparams.rewriting_loss_k

            epoch_num += torch.sum(batch_tokens_out_mask, dtype=torch.int64)
            epoch_ce_sum += torch.masked.sum(batch_tokens_out_ce.detach(), mask=batch_tokens_out_mask)
            epoch_acc_sum += torch.masked.sum(batch_tokens_out_acc.detach(), mask=batch_tokens_out_mask)

            loss.backward()
            optimizer.step()
            optimizer.zero_grad()

        epoch_num = epoch_num.cpu().item()
        epoch_ce_sum = epoch_ce_sum.cpu().item()
        epoch_acc_sum = epoch_acc_sum.cpu().item()

        epoch_ce = epoch_ce_sum / epoch_num
        epoch_acc = epoch_acc_sum / epoch_num

        if callback is not None:
            metrics = FinetuneMetrics(
                rewriting_ce=epoch_ce,
                rewriting_acc=epoch_acc_sum,
                processed_epochs_num=epoch_num + 1)
            callback.on_epoch_stop(metrics)

        if hparams.stopping_ce_threshold is not None:
            if epoch_ce <= hparams.stopping_ce_threshold:
                break

        if hparams.stopping_acc_threshold is not None:
            if epoch_acc >= hparams.stopping_acc_threshold:
                break

        if hparams.stopping_epochs_num is not None:
            if epoch_i + 1 >= hparams.stopping_epochs_num:
                break

    if callback is not None:
        callback.on_stop(metrics)
