import dataclasses
import json
import os

import numpy as np
import torch
import torch.distributed

from llmpt.neural import GPT
from llmpt.preprocess import SimpleVocabWithCount
from .vocab import VocabForNLP


class GPTModel:
    @dataclasses.dataclass(kw_only=True)
    class Hyperparams:
        name: str | None = None

        vocab_size: int
        emb_size: int
        layers_n: int
        pos_size: int
        queries_n: int
        groups_n: int
        qk_size: int
        v_size: int
        m_size: int
        h_size: int

    @classmethod
    def construct_nn(cls,
        hyperparams: Hyperparams,
        device: torch.device | str | None = None
    ) -> GPT:
        return GPT(
            vocab_size=hyperparams.vocab_size,
            emb_size=hyperparams.emb_size,
            layers_n=hyperparams.layers_n,
            pos_size=hyperparams.pos_size,
            queries_n=hyperparams.queries_n,
            groups_n=hyperparams.groups_n,
            qk_size=hyperparams.qk_size,
            v_size=hyperparams.v_size,
            m_size=hyperparams.m_size,
            h_size=hyperparams.h_size,
            device=device)

    @classmethod
    def create(cls, *,
        hyperparams: Hyperparams,
        vocab: SimpleVocabWithCount,
        device: torch.device | None = None,
    ) -> 'GPTModel':
        # vocab
        vocab = vocab.truncate(hyperparams.vocab_size - len(VocabForNLP.specials))
        vocab = VocabForNLP(vocab)

        # nn
        nn = cls.construct_nn(hyperparams, device)
        nn(torch.zeros([1, 1], dtype=torch.int64, device=device))

        return cls(
            hyperparams=hyperparams,
            vocab=vocab,
            nn=nn)

    @classmethod
    def load(cls,
        path: str, *,
        device: torch.device | None = None,
    ) -> 'GPTModel':
        # hyperparams
        with open(os.path.join(path, 'hyperparams.json'), 'rt', encoding='utf-8') as fp:
            hyperparams = cls.Hyperparams(**json.load(fp))

        # vocab
        vocab = SimpleVocabWithCount.load_from_file(os.path.join(path, 'vocab.csv'))
        vocab = VocabForNLP(vocab)
        assert len(vocab) == hyperparams.vocab_size

        # nn
        nn = cls.construct_nn(hyperparams, device)
        state = torch.load(
            os.path.join(path, 'weights.pt'),
            map_location=device or torch.get_default_device())
        nn.load_state_dict(state)

        return cls(
            hyperparams=hyperparams,
            vocab=vocab,
            nn=nn)

    def __init__(self, *,
        hyperparams: Hyperparams,
        vocab: VocabForNLP,
        nn: GPT,
    ):
        self.hyperparams = hyperparams
        self.vocab = vocab
        self.nn = nn

    @property
    def device(self) -> torch.device:
        return next(self.nn.parameters()).device

    @property
    def parameters_n(self) -> int:
        n = 0
        for param in self.nn.parameters():
            shape = param.data.shape
            n += np.prod(shape)
        return int(n)

    def save(self, path: str):
        os.makedirs(path, exist_ok=True)

        # hyperparameters
        with open(os.path.join(path, 'hyperparams.json'), 'wt', encoding='utf-8') as fp:
            json.dump(dataclasses.asdict(self.hyperparams), fp)

        # vocab
        self.vocab.base.save_to_file(os.path.join(path, 'vocab.csv'))

        # nn
        torch.save(self.nn.state_dict(), os.path.join(path, 'weights.pt'))
