from dataclasses import dataclass
from typing import Literal

import torch
from zkl_llmpt_llama3 import Llama3Transformer, Llama3TransformerHparams, compute_periods_cs
from zkl_llmpt_synergy import SynergyRouter, SynergyTransformer, make_sigma_positioner, none_positioner, \
    original_positioner

from .chunk_local import ChunkLocalTransformer
from .transformer import Llama3SynergySubTransformer

PositionerPreset = Literal['none', 'original', 'sigma', 'sigma_grad', 'sigma_all', 'sigma_all_grad']


@dataclass(kw_only=True)
class Llama3SynergyTransformerHparams:
    encoder: Llama3TransformerHparams
    middle: Llama3TransformerHparams
    decoder: Llama3TransformerHparams
    positioner: PositionerPreset = 'none'


def construct_llama3_synergy_transformer(
    hparams: Llama3SynergyTransformerHparams,
    rope_period_min: float | None = None,
    rope_period_max: float | None = None,
    dtype: torch.dtype | None = None,
    device: torch.device | None = None
) -> SynergyTransformer:
    encoder = Llama3Transformer(hparams=hparams.encoder, dtype=dtype, device=device)
    decoder = Llama3Transformer(hparams=hparams.decoder, dtype=dtype, device=device)
    middle = Llama3Transformer(hparams=hparams.middle, dtype=dtype, device=device)

    encoder = Llama3SynergySubTransformer(encoder)
    decoder = Llama3SynergySubTransformer(decoder)
    middle = Llama3SynergySubTransformer(middle)

    encoder = ChunkLocalTransformer(encoder)
    decoder = ChunkLocalTransformer(decoder)

    router = SynergyRouter(dtype=dtype, device=device)

    match hparams.positioner:
        case 'none':
            positioner = none_positioner
        case 'original':
            positioner = original_positioner
        case 'sigma' | 'sigma_grad' | 'sigma_all' | 'sigma_all_grad':
            def positional_encoder(pos: torch.Tensor) -> torch.Tensor:
                return compute_periods_cs(
                    pos=pos, size=hparams.middle.qk_size,
                    period_min=rope_period_min,
                    period_max=rope_period_max,
                    dtype=dtype, device=device)

            only_middle = hparams.positioner in ('sigma', 'sigma_grad')
            keep_gradient = hparams.positioner in ('sigma_grad', 'sigma_all_grad')

            positioner = make_sigma_positioner(
                positional_encoder=positional_encoder,
                keep_gradient=keep_gradient,
                only_middle=only_middle)
        case _:
            raise ValueError(f'Unknown positioner: {hparams.positioner}')

    return SynergyTransformer(
        encoder=encoder, middle=middle, decoder=decoder,
        router=router, positioner=positioner)
