import dataclasses
from typing import Iterator

import numpy as np
import torch
from zkl_llmpt_iterator import VocabForNLP

from llmpt.neural import GPT
from .kv_cache import KVCache


@dataclasses.dataclass
class GPTInferenceHparams:
    context_size: int
    generation_size: int
    temperature: float = 1.0


class GPTInference:
    def __init__(self, *,
        # hparams
        hparams: GPTInferenceHparams,

        # model
        vocab: VocabForNLP,
        nn: GPT,

        # device
        device: torch.device | str | None = None,

        # compile
        compile: bool = False,
    ):
        self.hparams = hparams
        self.vocab = vocab
        self.nn = nn
        self.device = device
        self.compile = compile

        if device is not None:
            self.nn: GPT = self.nn.to(device=device)
        if compile:
            # noinspection PyTypeChecker
            self.nn: GPT = torch.compile(self.nn)

    @property
    def context_size(self):
        return self.hparams.context_size

    @property
    def generation_size(self):
        return self.hparams.generation_size

    @property
    def temperature(self):
        return self.hparams.temperature

    def generate(self, prefix: str) -> Iterator[str]:
        prefix_tokens = [self.vocab.index_token(c) for c in prefix]
        prefix_tokens = [self.vocab.BEGIN_INDEX] + prefix_tokens
        prefix_tokens = torch.asarray(prefix_tokens, dtype=torch.int64, device=self.device)

        offset = torch.asarray(0, dtype=torch.float32, device=self.device)
        kv_cache = KVCache(self.context_size - 1)

        for token in prefix_tokens[:-1]:
            with torch.no_grad():
                tokens = torch.reshape(token, shape=(1, 1))
                _, layers_kv = self.nn.forward(tokens, offset, layers_extra_tokens_kv=kv_cache.read())
            kv_cache.append(layers_kv)
            offset += 1

        last_token = prefix_tokens[-1]
        for i in range(self.generation_size):
            with torch.no_grad():
                tokens = torch.reshape(last_token, shape=(1, 1))
                next_token_logits, layers_kv = self.nn.forward(tokens, offset, layers_extra_tokens_kv=kv_cache.read())
                next_token_logits = next_token_logits / self.temperature
                next_token_probs = torch.softmax(next_token_logits, dim=-1)
                next_token_probs = torch.reshape(next_token_probs, [-1])

            next_token_probs = next_token_probs.detach().cpu().numpy()
            next_token = np.random.choice(len(self.vocab), p=next_token_probs)

            stop_tokens = [self.vocab.BEGIN_INDEX, self.vocab.END_INDEX,
                           self.vocab.PADDING_INDEX, self.vocab.PREDICT_INDEX]
            if next_token in stop_tokens:
                break
            yield self.vocab.get_token(next_token)

            last_token = torch.asarray(next_token, dtype=torch.int64, device=self.device)
            kv_cache.append(layers_kv)
            offset += 1
