import os
from dataclasses import dataclass

import numpy as np
import torch
import torch.distributed
from zkl_llmpt_datasets import SimpleVocabWithCount
from zkl_llmpt_iterator import VocabForNLP
from zkl_serialization import dump_and_save_json, load_and_parse_json

from llmpt.neural import GPT


@dataclass(kw_only=True)
class GPTModelHparams:
    name: str | None = None

    vocab_size: int
    emb_size: int
    layers_n: int
    pos_size: int
    queries_n: int
    groups_n: int
    qk_size: int
    v_size: int
    m_size: int
    h_size: int


class GPTModel:
    @classmethod
    def construct_nn(cls,
        hparams: GPTModelHparams,
        device: torch.device | str | None = None
    ) -> GPT:
        return GPT(
            vocab_size=hparams.vocab_size,
            emb_size=hparams.emb_size,
            layers_n=hparams.layers_n,
            pos_size=hparams.pos_size,
            queries_n=hparams.queries_n,
            groups_n=hparams.groups_n,
            qk_size=hparams.qk_size,
            v_size=hparams.v_size,
            m_size=hparams.m_size,
            h_size=hparams.h_size,
            device=device)

    @classmethod
    def create(cls, *,
        hparams: GPTModelHparams,
        vocab: SimpleVocabWithCount,
        device: torch.device | None = None,
    ) -> 'GPTModel':
        # vocab
        vocab = vocab.truncate(hparams.vocab_size - len(VocabForNLP.specials))
        vocab = VocabForNLP(vocab)

        # nn
        nn = cls.construct_nn(hparams, device)
        nn(torch.zeros([1, 1], dtype=torch.int64, device=device))

        return cls(
            hparams=hparams,
            vocab=vocab,
            nn=nn)

    @classmethod
    def load(cls,
        path: str, *,
        device: torch.device | None = None,
    ) -> 'GPTModel':
        # hparams
        hparams = load_and_parse_json(os.path.join(path, 'hparams.json'))

        # vocab
        vocab = SimpleVocabWithCount.load_from_file(os.path.join(path, 'vocab.csv'))
        vocab = VocabForNLP(vocab)
        assert len(vocab) == hparams.vocab_size

        # nn
        nn = cls.construct_nn(hparams, device)
        state = torch.load(
            os.path.join(path, 'weights.pt'),
            map_location=device or torch.get_default_device())
        nn.load_state_dict(state)

        return cls(
            hparams=hparams,
            vocab=vocab,
            nn=nn)

    def __init__(self, *,
        hparams: GPTModelHparams,
        vocab: VocabForNLP,
        nn: GPT,
    ):
        self.hparams = hparams
        self.vocab = vocab
        self.nn = nn

    @property
    def device(self) -> torch.device:
        return next(self.nn.parameters()).device

    @property
    def parameters_n(self) -> int:
        n = 0
        for param in self.nn.parameters():
            shape = param.data.shape
            n += np.prod(shape)
        return int(n)

    def save(self, path: str):
        os.makedirs(path, exist_ok=True)

        # hyperparameters
        dump_and_save_json(self.hparams, os.path.join(path, 'hparams.json'))

        # vocab
        self.vocab.base.save_to_file(os.path.join(path, 'vocab.csv'))

        # nn
        torch.save(self.nn.state_dict(), os.path.join(path, 'weights.pt'))
