from dataclasses import dataclass

import torch
from zkl_ptutils_neural import RMSNorm

from zkl_llmpt_nbptt.nbptt_transformer import NbpttTransformer, NbpttTransformerHparams

@dataclass(kw_only=True)
class NbpttCausalLanguageModelHparams:
    embs_n: int
    emb_size: int
    transformer: NbpttTransformerHparams

class NbpttCausalLanguageModel(torch.nn.Module):
    def __init__(self,
        hparams: NbpttCausalLanguageModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.hparams = hparams

        self.embedding = torch.nn.Embedding(
            num_embeddings=hparams.embs_n,
            embedding_dim=hparams.emb_size,
            dtype=dtype, device=device)
        self.transformer = NbpttTransformer(
            hparams=hparams.transformer,
            dtype=dtype, device=device)
        self.out_norm = RMSNorm()
        self.out_linear = torch.nn.Linear(
            hparams.transformer.o_size,
            hparams.embs_n, bias=False,
            dtype=dtype, device=device)
        self.res_norm = RMSNorm()
        self.res_linear = torch.nn.Linear(
            hparams.transformer.o_size,
            hparams.embs_n, bias=False,
            dtype=dtype, device=device)

    def forward(self, wid: torch.Tensor):
        emb = self.embedding(wid)
        emb = self.transformer(emb)
        logits_out = self.out_linear(self.out_norm(emb))
        logits_res = self.res_linear(self.res_norm(emb))
        return logits_out, logits_res
