from dataclasses import dataclass

import torch

from .transformer import Llama3SynergySubTransformer


@dataclass
class ChunkLocalTransformerOutput:
    tokens_emb: torch.Tensor


class ChunkLocalTransformer(torch.nn.Module):
    def __init__(self, transformer: Llama3SynergySubTransformer):
        super().__init__()
        self.transformer = transformer

    def forward(self, *,
        tokens_emb: torch.Tensor,
        tokens_pos_emb: torch.Tensor | None = None,
        tokens_mask: torch.Tensor | None = None,
        local_tokens_n: int | None = None,
        **kwargs,
    ) -> ChunkLocalTransformerOutput:
        if local_tokens_n is None:
            return self.transformer(
                tokens_emb=tokens_emb,
                tokens_pos_emb=tokens_pos_emb,
                tokens_mask=tokens_mask,
                **kwargs)
        return chunk_local_transform(
            transformer=self.transformer,
            tokens_emb=tokens_emb,
            tokens_pos_emb=tokens_pos_emb,
            tokens_mask=tokens_mask,
            local_tokens_n=local_tokens_n,
            **kwargs)


def chunk_local_transform(*,
    transformer: Llama3SynergySubTransformer,
    tokens_emb: torch.Tensor,
    tokens_pos_emb: torch.Tensor | None = None,
    tokens_mask: torch.Tensor | None = None,
    local_tokens_n: int,
    **kwargs,
) -> ChunkLocalTransformerOutput:
    chunks_n = ceil_dev(tokens_emb.shape[-2], local_tokens_n)
    chunks_tokens_in_emb = tokens_emb.split(local_tokens_n, dim=-2)
    chunks_tokens_in_pos_emb = tokens_pos_emb.split(local_tokens_n, dim=-2) \
        if tokens_pos_emb is not None else ([None] * chunks_n)
    chunks_tokens_in_mask = tokens_mask.split(local_tokens_n, dim=-1) \
        if tokens_mask is not None else ([None] * chunks_n)

    chunks_tokens_out_emb = []
    last_layers_tokens_kv = None
    for (chunk_tokens_in_emb, chunk_tokens_in_pos_emb, chunk_tokens_in_mask) \
        in zip(chunks_tokens_in_emb, chunks_tokens_in_pos_emb, chunks_tokens_in_mask):
        chunk_output = transformer(
            tokens_emb=chunk_tokens_in_emb,
            tokens_pos_emb=chunk_tokens_in_pos_emb,
            tokens_mask=chunk_tokens_in_mask,
            layers_extra_tokens_kv=last_layers_tokens_kv, **kwargs)
        chunks_tokens_out_emb.append(chunk_output.tokens_emb)
        last_layers_tokens_kv = chunk_output.layers_tokens_kv

    tokens_out_emb = torch.cat(chunks_tokens_out_emb, dim=-2)
    return ChunkLocalTransformerOutput(tokens_emb=tokens_out_emb)


def ceil_dev(a: int, b: int) -> int:
    return (a + b - 1) // b
