# SPDX-FileCopyrightText: 2025 MiromindAI
#
# SPDX-License-Identifier: Apache-2.0

import logging
import os
from typing import Dict, Optional

import torch

from flash_attn.flash_attn_interface import (
    flash_attn_func as _flash_attention_call,
    flash_attn_varlen_func as _flash_attention_varlen_call,
)

from mirotrain.modules.ulysses import (
    gather_heads_scatter_seq,
    gather_seq_scatter_heads,
    get_ulysses_sequence_parallel_world_size,
)
from torch import nn
from torchtune.modules import MultiHeadAttention
from torchtune.modules.attention_utils import _MaskType
from torchtune.modules.kv_cache import KVCache

logger = logging.getLogger(__name__)

_USE_FLASH_ATTENTION = True


def get_use_flash_attention() -> bool:
    """Get whether Flash Attention should be used.

    Returns:
        bool: True if Flash Attention should be used, False otherwise.
    """
    return _USE_FLASH_ATTENTION


def set_use_flash_attention(use_flash_attention: bool) -> None:
    """Set whether to use Flash Attention.

    Args:
        use_flash_attention (bool): Whether to use Flash Attention.
    """
    global _USE_FLASH_ATTENTION
    _USE_FLASH_ATTENTION = use_flash_attention


class MultiHeadAttentionWithUlysses(MultiHeadAttention):
    """Multi-headed attention layer with support for grouped query
    attention (GQA) introduced in https://arxiv.org/abs/2305.13245v1.

    Supports ulysses sequence parallel for long sequence.

    GQA is a version of multiheaded attention (MHA) which uses fewer
    key/value heads than query heads by grouping n query heads for each
    key and value head. Multi-Query Attention is an extreme
    version where we have a single key and value head shared by all
    query heads.

    Following is an example of MHA, GQA and MQA with num_heads = 4

    (credit for the documentation:
    `litgpt.Config <https://github.com/Lightning-AI/litgpt/blob/eda1aaaf391fd689664f95487ab03dc137e213fd/litgpt/config.py>`_).


    ::

        ┌───┐┌───┐┌───┐┌───┐     ┌───┐    ┌───┐             ┌───┐
        │ v ││ v ││ v ││ v │     │ v │    │ v │             │ v │
        └───┘└───┘└───┘└───┘     └───┘    └───┘             └───┘
        │    │    │    │         │        │                 │
        ┌───┐┌───┐┌───┐┌───┐     ┌───┐    ┌───┐             ┌───┐
        │ k ││ k ││ k ││ k │     │ k │    │ k │             │ k │
        └───┘└───┘└───┘└───┘     └───┘    └───┘             └───┘
        │    │    │    │      ┌──┴──┐  ┌──┴──┐      ┌────┬──┴─┬────┐
        ┌───┐┌───┐┌───┐┌───┐  ┌───┐┌───┐┌───┐┌───┐  ┌───┐┌───┐┌───┐┌───┐
        │ q ││ q ││ q ││ q │  │ q ││ q ││ q ││ q │  │ q ││ q ││ q ││ q │
        └───┘└───┘└───┘└───┘  └───┘└───┘└───┘└───┘  └───┘└───┘└───┘└───┘
        ◀──────────────────▶  ◀──────────────────▶  ◀──────────────────▶
                MHA                    GQA                   MQA
        n_kv_heads =4          n_kv_heads=2           n_kv_heads=1

    Args:
        embed_dim (int): embedding dimension for the model
        num_heads (int): number of query heads. For MHA this is also the
            number of heads for key and value
        num_kv_heads (int): number of key and value heads. User should ensure
            ``num_heads % num_kv_heads == 0``. For standard MHA set ``num_kv_heads == num_heads``,
            for GQA ``num_kv_heads < num_heads``, and for MQA set ``num_kv_heads == 1``.
        head_dim (int): dimension of each head, calculated by ``embed_dim // num_heads``.
        q_proj (nn.Module): projection layer for query.
        k_proj (nn.Module): projection layer for key.
        v_proj (nn.Module): projection layer for value.
        output_proj (nn.Module): projection layer for output.
        pos_embeddings (Optional[nn.Module]): positional embeddings layer, e.g. RotaryPositionalEmbeddings.
        q_norm (Optional[nn.Module]): normalization layer for query, e.g. RMSNorm. For decoding, this is applied
            before updating from kv_cache. This means it will only support token wide normalization and not
            batch or sequence wide normalization.
        k_norm (Optional[nn.Module]): normalization layer for key, must be set if q_norm is.
        kv_cache (Optional[KVCache]): KVCache object used to cache key and value
        max_seq_len (int): maximum sequence length supported by the model.
            This is needed to compute the RoPE Cache. Default: 4096.
        is_causal (bool): sets the default mask to causal when no mask is provided
        attn_dropout (float): dropout value passed onto the scaled_dot_product_attention function.
            Default value is 0.0.

    """

    def __init__(
        self,
        *,
        embed_dim: int,
        num_heads: int,
        num_kv_heads: int,
        head_dim: int,
        q_proj: nn.Module,
        k_proj: nn.Module,
        v_proj: nn.Module,
        output_proj: nn.Module,
        pos_embeddings: Optional[nn.Module] = None,
        q_norm: Optional[nn.Module] = None,
        k_norm: Optional[nn.Module] = None,
        kv_cache: Optional[KVCache] = None,
        max_seq_len: int = 4096,
        is_causal: bool = True,
        attn_dropout: float = 0.0,
    ) -> None:
        super().__init__(
            embed_dim=embed_dim,
            num_heads=num_heads,
            num_kv_heads=num_kv_heads,
            head_dim=head_dim,
            q_proj=q_proj,
            k_proj=k_proj,
            v_proj=v_proj,
            output_proj=output_proj,
            pos_embeddings=pos_embeddings,
            q_norm=q_norm,
            k_norm=k_norm,
            kv_cache=kv_cache,
            max_seq_len=max_seq_len,
            is_causal=is_causal,
            attn_dropout=attn_dropout,
        )

    def forward(
        self,
        x: torch.Tensor,
        y: Optional[torch.Tensor] = None,
        *,
        mask: Optional[_MaskType] = None,
        input_pos: Optional[torch.Tensor] = None,
        **kwargs: Dict,
    ) -> torch.Tensor:
        """
        Args:
            x (torch.Tensor): input tensor with shape [b x s_x x d] for the query
            y (Optional[torch.Tensor]): second input tensor with shape [b x s_y x d], is the input
                for k and v. For self attention, x=y. Optional only with kv_cache enabled.
            mask (Optional[_MaskType]): Used to mask the scores after the query-key multiplication
                and before the softmax. Either:

                A boolean tensor with shape ``[b x s x s]``, ``[b x s x self.encoder_max_cache_seq_len]``,
                or ``[b x s x self.decoder_max_cache_seq_len]`` if using KV-cacheing with encoder/decoder layers.
                A value of True in row ``i`` and column ``j`` means token ``i`` attends to token ``j``. A value of False means
                token ``i`` does not attend to token ``j``. If no mask is specified, a causal mask
                is used by default.

                A :class:`~torch.nn.attention.flex_attention.BlockMask` for document masking in a packed sequence
                created via `create_block_mask <https://pytorch.org/blog/flexattention/#mask-mods>`_. We  use
                :func:`~torch.nn.attention.flex_attention.flex_attention` when computing attention with block masks.
                Default is None.
            input_pos (Optional[torch.Tensor]): Optional tensor which contains the position ids
                of each token. During training, this is used to indicate the positions
                of each token relative to its sample when packed, shape [b x s].
                During inference, this indicates the position of the current token.
                If none, assume the index of the token is its position id. Default is None.
            **kwargs (Dict): Optional additional arguments.

        Raises:
            ValueError: If no ``y`` input and ``kv_cache`` is not enabled.
            NotImplementedError: If flex_attention is used with ulysses_sp.

        Returns:
            torch.Tensor: output tensor with attention applied

        Notation used for tensor shapes:
            - b: batch size
            - s_x: sequence length for x
            - s_y: sequence length for y
            - n_h: num heads
            - n_kv: num kv heads
            - d: embed dim
            - h_d: head dim
        """
        assert y is not None

        # x has shape [b, s_x, d]
        # y has shape [b, s_y, d]
        b, s_x, _ = x.shape
        s_y = y.shape[1] if y is not None else 0

        # q has shape [b, s_x, num_heads * head_dim]
        q = self.q_proj(x)

        # number of queries per key/value
        q_per_kv = self.num_heads // self.num_kv_heads
        q = q.view(b, s_x, self.num_kv_heads * q_per_kv, self.head_dim)

        if y is not None:
            # k,v shape [b, s_y, num_kv_heads * head_dim]
            k = self.k_proj(y)
            v = self.v_proj(y)

            # Update k and v shape
            # k,v shape: [b, s_y, n_kv, h_d]
            k = k.view(b, s_y, -1, self.head_dim)
            v = v.view(b, s_y, -1, self.head_dim)

        if y is None:
            if self.kv_cache is None or not self.cache_enabled:
                raise ValueError(
                    "Must provide y input or use kv_cache to enable streaming decoding"
                )
            k = self.kv_cache.k_cache
            v = self.kv_cache.v_cache
        else:
            # AlltoAll for Ulysses
            ulysses_sp_size = get_ulysses_sequence_parallel_world_size()

            if ulysses_sp_size > 1:
                replication_factor = (
                    ulysses_sp_size + self.num_kv_heads - 1
                ) // self.num_kv_heads
                if replication_factor > 1:
                    k = torch.repeat_interleave(k, repeats=replication_factor, dim=2)
                    v = torch.repeat_interleave(v, repeats=replication_factor, dim=2)
                # (bsz, seq_len/n, n_head, head_dim) -> (bsz, seq_len, n_head/n, head_dim)
                # [b, s, n_h, h_d]
                q = gather_seq_scatter_heads(q, seq_dim=1, head_dim=2)
                k = gather_seq_scatter_heads(k, seq_dim=1, head_dim=2)
                v = gather_seq_scatter_heads(v, seq_dim=1, head_dim=2)

            full_q_len = q.size(1)  # full seq length
            shard_q_num_heads = q.size(2)
            shard_kv_num_heads = k.size(2)

            # Apply positional embeddings
            if self.pos_embeddings is not None:
                q = self.pos_embeddings(q, input_pos=input_pos)

            # [b, n_h, s_x, h_d]
            q = q.transpose(1, 2)

            # Normalize q
            if self.q_norm is not None:
                q = self.q_norm(q)

            # Apply k,v positional embeddings, and normalization
            if self.pos_embeddings is not None:
                k = self.pos_embeddings(k, input_pos=input_pos)

            # k,v shape: [b, n_kv, s_y, h_d]
            k = k.transpose(1, 2)
            v = v.transpose(1, 2)

            # Normalize k
            if self.k_norm is not None:
                k = self.k_norm(k)

            # Update key-value cache
            if self.kv_cache is not None and self.cache_enabled:
                k, v = self.kv_cache.update(k, v)

        if "cu_seqlens" in kwargs:
            assert get_use_flash_attention(), "Flash Attention is not enabled"
            cu_seqlens_q = kwargs["cu_seqlens"]
            cu_seqlens_k = kwargs["cu_seqlens"]
            max_seqlen_q = kwargs["max_seqlen"]
            max_seqlen_k = kwargs["max_seqlen"]
            # reshape the q, k, v to [b, s, n_h, h_d] and squeeze the batch dimension
            q = q.transpose(1, 2).contiguous().squeeze(0)
            k = k.transpose(1, 2).contiguous().squeeze(0)
            v = v.transpose(1, 2).contiguous().squeeze(0)
            output = _flash_attention_varlen_call(
                q,
                k,
                v,
                cu_seqlens_q,
                cu_seqlens_k,
                max_seqlen_q,
                max_seqlen_k,
                dropout_p=0.0,
                causal=True,
                deterministic=os.getenv("flash_attn_deterministic", "False") == "True",
            )
            output = output.unsqueeze(0).view(b, full_q_len, -1)
        elif get_use_flash_attention():
            # reshape the q, k, v to [b, s, n_h, h_d]
            q = q.transpose(1, 2).contiguous()
            k = k.transpose(1, 2).contiguous()
            v = v.transpose(1, 2).contiguous()
            output = _flash_attention_call(
                q,
                k,
                v,
                dropout_p=0.0,
                causal=True,
            )
            output = output.view(b, full_q_len, -1)
        else:
            # If needed, expand the key and value tensors to have the same shape
            # as the query tensor by copying values across the relevant dim
            # k,v shape: [b, n_kv, s, h_d] -> [b, n_h, s, h_d]
            if ulysses_sp_size > 1:
                raise NotImplementedError(
                    "flex_attention with ulysses_sp is not tested"
                )
            if self.num_heads != self.num_kv_heads:
                expand_shape = (b, shard_kv_num_heads, q_per_kv, -1, self.head_dim)
                k = k.unsqueeze(2).expand(expand_shape).flatten(1, 2)
                v = v.unsqueeze(2).expand(expand_shape).flatten(1, 2)

            output = self._attention_call(
                q,
                k,
                v,
                mask=mask,
                dropout_p=self.attn_dropout if self.training else 0.0,
                is_causal=self.kv_cache is None and mask is None and self.is_causal,
            )

            # reshape the output to be the same shape as the input
            output = output.transpose(1, 2).contiguous().view(b, full_q_len, -1)

        # AlltoAll for Ulysses
        if ulysses_sp_size > 1:
            output = gather_heads_scatter_seq(output, seq_dim=1, head_dim=2)

        return self.output_proj(output)
