# Copyright (c) Microsoft Corporation and Jiarui Fang
# Copyright (c) 2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team & Jiarui Fang
# modified from https://github.com/feifeibear/long-context-attention/blob/main/yunchang/ulysses/attn_layer.py

from typing import Optional

import torch
import torch.distributed as dist

from openmind.flow.model.sequence_parallel.seq_comm import SeqAllToAll4D


class UlyssesAttention(torch.nn.Module):
    def __init__(
        self,
        sequence_process_group: dist.ProcessGroup = None,
        scatter_idx: int = 2,
        gather_idx: int = 1,
        attn_fn: Optional[callable] = None,
    ) -> None:

        super(UlyssesAttention, self).__init__()
        self.spg = sequence_process_group
        self.scatter_idx = scatter_idx
        self.gather_idx = gather_idx
        self.attn_fn = attn_fn

    def forward(
        self,
        query,
        key,
        value,
        dropout,
        scaling,
        *args,
    ):
        # (bs, head_cnt, seq_len/N, head_size) -> (bs, seq_len/N, head_cnt, head_size)
        query = query.transpose(1, 2)
        key = key.transpose(1, 2)
        value = value.transpose(1, 2)

        # (bs, seq_len/N, head_cnt, head_size) -> (bs, seq_len, head_cnt/N, head_size)
        q = SeqAllToAll4D.apply(self.spg, query, self.scatter_idx, self.gather_idx)
        k = SeqAllToAll4D.apply(self.spg, key, self.scatter_idx, self.gather_idx)
        v = SeqAllToAll4D.apply(self.spg, value, self.scatter_idx, self.gather_idx)

        # (bs, seq_len, head_cnt/N, head_size) -> (bs, head_cnt/N, seq_len, head_size)
        q = q.transpose(1, 2).contiguous()
        k = k.transpose(1, 2).contiguous()
        v = v.transpose(1, 2).contiguous()

        if scaling is None:
            scaling = q.shape[-1] ** -0.5

        context_layer = self.attn_fn(
            q,
            k,
            v,
            dropout_p=dropout,
            scale=scaling,
            is_causal=True,
        )

        if isinstance(context_layer, tuple):
            context_layer = context_layer[0]
        context_layer = context_layer.transpose(1, 2)

        # (bs, seq_len, head_cnt/N, head_size) -> (bs, seq_len/N, head_cnt, head_size)
        output = SeqAllToAll4D.apply(self.spg, context_layer, self.gather_idx, self.scatter_idx)
        output = output.transpose(1, 2).contiguous()
        return output
