# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.


import math
from typing import Optional

import torch
from torch import Tensor

from megatron.core import parallel_state, tensor_parallel
from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.utils import attention_mask_func
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.utils import divide

from megatron_patch.model.llama2.rotary_pos_embedding_fuse import RotaryEmbedding as RotaryEmbedding_Fuse
try:
    from einops import rearrange
except ImportError:
    rearrange = None

# Try FlashAttn2 first
try:
    from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_varlen_qkvpacked_func
    from flash_attn.flash_attn_interface import flash_attn_varlen_func as flash_attn_unpadded_func
except ImportError:
        flash_attn_varlen_qkvpacked_func = None
        flash_attn_unpadded_func = None

try:
    import flash_fusion
    from megatron_patch.model.llama2.rotary_pos_embedding_flash import RotaryEmbedding as RotaryEmbeddingFlash
except ImportError:
    print('flash fusion is not available, please install it')

def get_current_device() -> torch.device:
    """
    Returns currently selected device (gpu/cpu).
    If cuda available, return gpu, otherwise return cpu.
    """
    if torch.cuda.is_available():
        return torch.device(f"cuda:{torch.cuda.current_device()}")
    else:
        return torch.device("cpu")


class FlashSelfAttention_packed(torch.nn.Module):
    """Implement the scaled dot product attention with softmax.
    Arguments
    ---------
        softmax_scale: The temperature to use for the softmax attention.
                      (default: 1/sqrt(d_keys) where d_keys is computed at
                      runtime)
        attention_dropout: The dropout rate to apply to the attention
                           (default: 0.0)
    """
    def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, micro_batch_size = None, deterministic_mode = False,
                 device=None, dtype=None):
        super().__init__()
        assert flash_attn_varlen_qkvpacked_func is not None, ('Please install FlashAttention first, '
                                                      'e.g., with pip install flash-attn')
        assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
        self.causal = causal
        self.softmax_scale = softmax_scale
        self.dropout_p = attention_dropout
        self.micro_batch_size = micro_batch_size
        self.deterministic_mode = deterministic_mode

    def forward(self, qkv):
        """Implements the multihead softmax attention.
        Arguments
        ---------
            qkv: The tensor containing the query, key, and value. [(b s) three h d]
        """

        assert qkv.dtype in [torch.float16, torch.bfloat16]
        assert qkv.is_cuda
        #batch_size, seqlen_q = q.shape[0], q.shape[1]
        #seqlen_k = k.shape[1]
        seq_len = qkv.shape[0] // self.micro_batch_size
        cu_seqlens = torch.arange(0, (self.micro_batch_size + 1) * seq_len, step=seq_len, dtype=torch.int32,
                                    device=qkv.device)

        if self.training:
            is_causal = self.causal
            dropout_p = self.dropout_p
        else:
            # turn off FA causal mask after first inference autoregressive iteration
            # only on first autoregressive step q,k,v have same seqlen
            is_causal = self.causal
            cu_seqlens_k = cu_seqlens
            dropout_p = 0
        from megatron.training import get_args
        self.args = get_args()
        if self.args.yd_alibi:
            yd_alibi_slopes = torch.zeros((self.micro_batch_size, qkv.shape[-2] // self.micro_batch_size), device=qkv.device, dtype=torch.float32)
            output = flash_attn_varlen_qkvpacked_func(
                qkv, cu_seqlens, seq_len, dropout_p,
                softmax_scale=self.softmax_scale, alibi_slopes=yd_alibi_slopes, causal=is_causal
            )
        else:
            output = flash_attn_varlen_qkvpacked_func(
                qkv, cu_seqlens, seq_len, dropout_p,
                softmax_scale=self.softmax_scale, causal=is_causal, deterministic = self.deterministic_mode,
            )

        output = rearrange(output, '(b s) ... -> b s ...', b=self.micro_batch_size)
        return output

class FlashSelfAttention_unpacked(torch.nn.Module):
    """Implement the scaled dot product attention with softmax.
    Arguments
    ---------
        softmax_scale: The temperature to use for the softmax attention.
                      (default: 1/sqrt(d_keys) where d_keys is computed at
                      runtime)
        attention_dropout: The dropout rate to apply to the attention
                           (default: 0.0)
    """
    def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0, deterministic_mode = False,
                 device=None, dtype=None):
        super().__init__()
        assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
                                                      'e.g., with pip install flash-attn')
        assert rearrange is not None, 'Please install einops first, e.g., with pip install einops'
        self.causal = causal
        self.softmax_scale = softmax_scale
        self.dropout_p = attention_dropout
        self.deterministic_mode = deterministic_mode

    def forward(self, q, k, v):
        """Implements the multihead softmax attention.
        Arguments
        ---------
            q, k, v: The tensor containing the query, key, and value. (B, S, H, D)
        """

        assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q,k,v)))
        assert all((i.is_cuda for i in (q,k,v)))

        batch_size, seqlen_q = q.shape[0], q.shape[1]
        seqlen_k = k.shape[1]

        q, k, v = [rearrange(x, 'b s ... -> (b s) ...') for x in [q, k, v]]
        cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32,
                                    device=q.device)

        if self.training:
            # during training q,k,v always have same seqlen
            assert seqlen_k == seqlen_q

            is_causal = self.causal
            cu_seqlens_k = cu_seqlens_q
            dropout_p = self.dropout_p
        else:
            # turn off FA causal mask after first inference autoregressive iteration
            # only on first autoregressive step q,k,v have same seqlen
            is_causal = seqlen_q == seqlen_k
            cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32,
                        device=q.device)
            dropout_p = 0

        output = flash_attn_unpadded_func(
            q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k,
            dropout_p,
            softmax_scale=self.softmax_scale, causal=is_causal, deterministic = self.deterministic_mode
        )

        output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
        return output

class DotProductAttention(MegatronModule):
    """
    Region where selective activation recomputation is applied.
    This region is memory intensive but less compute intensive which
    makes activation checkpointing more efficient for LLMs (20B+).
    See Reducing Activation Recomputation in Large Transformer Models:
    https://arxiv.org/abs/2205.05198 for more details.

    We use the following notation:
     h: hidden size
     n: number of attention heads
     p: number of tensor model parallel partitions
     b: batch size
     s: sequence length
    """

    def __init__(
        self,
        config: TransformerConfig,
        layer_number: int,
        attn_mask_type: AttnMaskType,
        attention_type: str,
        attention_dropout: float = None,
        softmax_scale: float = None,
        cp_comm_type: str = None,
    ):
        super().__init__(config=config)

        self.config: TransformerConfig = config

        assert (
            self.config.context_parallel_size == 1
        ), "Context parallelism is only supported by TEDotProductAttention!"

        assert (
            self.config.window_size is None
        ), "Sliding Window Attention is only supported by TEDotProductAttention!"
        from megatron.training import get_args
        self.args = get_args()
        self.seq_length = self.args.seq_length
        self.micro_batch_size = self.args.micro_batch_size
        self.head_dim = self.args.hidden_size // self.args.num_attention_heads
        if self.args.use_rotary_emb_implement == 'flash_attn': 
            assert self.args.rotary_percent == 1.0, ('rotary_emb implemented by flash attn only supported rotary_percent = 1.0')
            self.rotary_emb = RotaryEmbedding_Fuse(self.head_dim, device=get_current_device())

        self.use_flash_attn = self.args.use_flash_attn \
            and attention_type == 'self' \
            and attn_mask_type == AttnMaskType.causal

        self.layer_number = max(1, layer_number)
        self.attn_mask_type = attn_mask_type
        self.attention_type = attention_type  # unused for now

        projection_size = self.config.kv_channels * self.config.num_attention_heads

        # Per attention head and per partition values.
        world_size = parallel_state.get_tensor_model_parallel_world_size()
        self.hidden_size_per_partition = divide(projection_size, world_size)
        self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads)
        self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size)
        self.num_query_groups_per_partition = divide(self.config.num_query_groups, world_size)

        coeff = None
        if softmax_scale is None:
            self.softmax_scale = 1.0 / math.sqrt(self.hidden_size_per_attention_head)
        else:
            self.softmax_scale = softmax_scale

        if self.config.apply_query_key_layer_scaling:
            coeff = self.layer_number
            self.softmax_scale /= coeff

        if self.use_flash_attn:
            if flash_attn_varlen_qkvpacked_func is None:
                raise ImportError('FlashAttention is not installed, please install with '
                                  'pip install flash-attn')
            assert attention_type == 'self', ('FlashAttention code path only supports '
                                                          'self-attention for now')
            assert attn_mask_type == AttnMaskType.causal, ('FlashAttention code path only '
                                                                'supports causal mask for now')
           
            if self.args.use_rotary_emb_implement == 'flash_attn':        
                self.core_attention_flash = FlashSelfAttention_packed(causal=True, softmax_scale=self.softmax_scale, attention_dropout=self.config.attention_dropout,
                    micro_batch_size = self.micro_batch_size, deterministic_mode = self.config.deterministic_mode)
            elif self.args.use_rotary_emb_implement == 'apex':
                self.core_attention_flash = FlashSelfAttention_unpacked(
                    causal=True, softmax_scale=self.softmax_scale, attention_dropout=config.attention_dropout, deterministic_mode = self.config.deterministic_mode)
            else:
                self.core_attention_flash = None


        self.scale_mask_softmax = FusedScaleMaskSoftmax(
            input_in_fp16=self.config.fp16,
            input_in_bf16=self.config.bf16,
            attn_mask_type=self.attn_mask_type,
            scaled_masked_softmax_fusion=self.config.masked_softmax_fusion,
            mask_func=attention_mask_func,
            softmax_in_fp32=self.config.attention_softmax_in_fp32,
            scale=coeff,
        )

        # Dropout. Note that for a single iteration, this layer will generate
        # different outputs on different number of parallel partitions but
        # on average it should not be partition dependent.
        self.attention_dropout = torch.nn.Dropout(
            self.config.attention_dropout if attention_dropout is None else attention_dropout
        )

    def forward(
        self,
        query: Tensor,
        key: Tensor,
        value: Tensor,
        attention_mask: Tensor,
        attn_mask_type: AttnMaskType = None,
        attention_bias: Tensor = None,
        packed_seq_params: Optional[PackedSeqParams] = None,
    ):
        """Forward."""
        assert packed_seq_params is None, (
            "Packed sequence is not supported by DotProductAttention."
            "Please use TEDotProductAttention instead."
        )
        assert attention_bias is None, "Attention bias is not supported for DotProductAttention."

        # ===================================
        # Raw attention scores. [b, n/p, s, s]
        # ===================================

        # expand the key and value [sk, b, ng, hn] -> [sk, b, np, hn]
        # This is a noop for normal attention where ng == np. When using group query attention this
        # creates a view that has the keys and values virtually repeated along their dimension to
        # match the number of queries.

        # attn_mask_type is not used.
        # if self.num_attention_heads_per_partition // self.num_query_groups_per_partition > 1:
        #     key = key.repeat_interleave(
        #         self.num_attention_heads_per_partition // self.num_query_groups_per_partition, dim=2
        #     )
        #     value = value.repeat_interleave(
        #         self.num_attention_heads_per_partition // self.num_query_groups_per_partition, dim=2
        #     )
        if self.args.use_rotary_emb_implement == 'flash_attn':
            sk, b, ng, hn = key.shape
            repeat = self.num_attention_heads_per_partition // self.num_query_groups_per_partition
            if repeat > 1:
                key = key.view(sk, b, ng, 1, hn).expand(sk, b, ng, repeat,hn).contiguous().view(sk, b, repeat * ng, hn)
                value = value.view(sk, b, ng, 1, hn).expand(sk, b, ng, repeat,hn).contiguous().view(sk, b, repeat * ng, hn)
        

        if self.args.use_rotary_emb_implement == 'flash_attn':
            qkv = torch.stack([query, key, value], dim=2)
            qkv = rearrange(qkv, "s b three h d -> (b s) three h d")
            if self.args.position_embedding_type == 'rope':
                indexes = torch.arange(key.shape[0]*key.shape[1])
                kwargs = {"arg3":3, "indexes":indexes}
                if not self.args.yd_alibi:
                    qkv = self.rotary_emb(qkv, **kwargs)
                kwargs.pop("indexes")
            if not self.use_flash_attn:
                qkv = rearrange(qkv, "(b s) three h d -> s b three h d", b = self.micro_batch_size)
                query = qkv[:,:,0,:,:]
                key = qkv[:,:,1,:,:]
                value = qkv[:,:,2,:,:]
        
        if self.use_flash_attn:
            if self.args.use_rotary_emb_implement == 'flash_attn':
                if not self.config.sequence_parallel:
                    with tensor_parallel.get_cuda_rng_tracker().fork():
                        context_layer = self.core_attention_flash(qkv)
                else:
                    context_layer = self.core_attention_flash(qkv)
                context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous()

                return context_layer #[s, b, h]

            elif self.args.use_rotary_emb_implement == 'apex':
                q, k, v = [rearrange(x, 's b ... -> b s ...')
                            for x in (query, key, value)]
                k = k.contiguous() if self.num_attention_heads_per_partition // self.num_query_groups_per_partition <= 1 else k
                v = v.contiguous() if self.num_attention_heads_per_partition // self.num_query_groups_per_partition <= 1 else v
                q = q.contiguous() if self.args.position_embedding_type != 'rope' else q
                if not self.config.sequence_parallel:
                    with tensor_parallel.get_cuda_rng_tracker().fork():
                        context_layer = self.core_attention_flash(q, k, v)
                else:
                    context_layer = self.core_attention_flash(q, k, v)

                context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous()

                return context_layer #[s, b, h]

            else:
                raise Exception('Only apex and flash_attn implement are curently supported')

        # [b, np, sq, sk]
        output_size = (query.size(1), query.size(2), query.size(0), key.size(0))

        # [sq, b, np, hn] -> [sq, b * np, hn]
        # This will be a simple view when doing normal attention, but in group query attention
        # the key and value tensors are repeated to match the queries so you can't use
        # simple strides to extract the queries.
        query = query.reshape(output_size[2], output_size[0] * output_size[1], -1)
        # [sk, b, np, hn] -> [sk, b * np, hn]
        key = key.view(output_size[3], output_size[0] * output_size[1], -1)

        # preallocting input tensor: [b * np, sq, sk]
        matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor(
            (output_size[0] * output_size[1], output_size[2], output_size[3]), query.dtype, "mpu"
        )

        # Raw attention scores. [b * np, sq, sk]
        matmul_result = torch.baddbmm(
            matmul_input_buffer,
            query.transpose(0, 1),  # [b * np, sq, hn]
            key.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]
            beta=0.0,
            alpha=self.softmax_scale,
        )

        # change view to [b, np, sq, sk]
        attention_scores = matmul_result.view(*output_size)

        # ===========================
        # Attention probs and dropout
        # ===========================

        # attention scores and attention mask [b, np, sq, sk]
        attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask)

        # This is actually dropping out entire tokens to attend to, which might
        # seem a bit unusual, but is taken from the original Transformer paper.

        if not self.config.sequence_parallel:
            with tensor_parallel.get_cuda_rng_tracker().fork():
                attention_probs = self.attention_dropout(attention_probs)
        else:
            attention_probs = self.attention_dropout(attention_probs)

        # =========================
        # Context layer. [sq, b, hp]
        # =========================

        # value -> context layer.
        # [sk, b, np, hn] --> [b, np, sq, hn]

        # context layer shape: [b, np, sq, hn]
        output_size = (value.size(1), value.size(2), query.size(0), value.size(3))

        # change view [sk, b * np, hn]
        value = value.view(value.size(0), output_size[0] * output_size[1], -1)

        # change view [b * np, sq, sk]
        attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)

        # matmul: [b * np, sq, hn]
        context = torch.bmm(attention_probs, value.transpose(0, 1))

        # change view [b, np, sq, hn]
        context = context.view(*output_size)

        # [b, np, sq, hn] --> [sq, b, np, hn]
        context = context.permute(2, 0, 1, 3).contiguous()

        # [sq, b, np, hn] --> [sq, b, hp]
        new_context_shape = context.size()[:-2] + (self.hidden_size_per_partition,)
        context = context.view(*new_context_shape)

        return context


class DotProductAttention_QKVPacked(MegatronModule):
    """
    Region where selective activation recomputation is applied.
    This region is memory intensive but less compute intensive which
    makes activation checkpointing more efficient for LLMs (20B+).
    See Reducing Activation Recomputation in Large Transformer Models:
    https://arxiv.org/abs/2205.05198 for more details.

    We use the following notation:
     h: hidden size
     n: number of attention heads
     p: number of tensor model parallel partitions
     b: batch size
     s: sequence length
    """

    def __init__(
        self,
        config: TransformerConfig,
        layer_number: int,
        attn_mask_type: AttnMaskType,
        attention_type: str,
        attention_dropout: float = None,
        softmax_scale: float = None,
        cp_comm_type: str = None,
    ):
        super().__init__(config=config)

        self.config: TransformerConfig = config

        assert (
            self.config.context_parallel_size == 1
        ), "Context parallelism is only supported by TEDotProductAttention!"

        assert (
            self.config.window_size is None
        ), "Sliding Window Attention is only supported by TEDotProductAttention!"
        from megatron.training import get_args
        self.args = get_args()
        self.seq_length = self.args.seq_length
        self.micro_batch_size = self.args.micro_batch_size
        self.head_dim = self.args.hidden_size // self.args.num_attention_heads
        assert self.args.rotary_percent == 1.0, ('rotary_emb implemented by flash attn only supported rotary_percent = 1.0')


        if self.config.use_flash_fusion:
            self.rotary_emb_flash = RotaryEmbeddingFlash(self.head_dim)
        else:
            self.rotary_emb_flash = RotaryEmbedding_Fuse(self.head_dim, device=get_current_device())

        try:
            from flash_fusion import FusedRepeatKV
            self.repeat_stack_func = FusedRepeatKV.apply
        except:
            self.repeat_stack_func = None

        projection_size = self.config.kv_channels * self.config.num_attention_heads

        # Per attention head and per partition values.
        world_size = parallel_state.get_tensor_model_parallel_world_size()
        self.hidden_size_per_partition = divide(projection_size, world_size)
        self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads)
        self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size)
        self.num_query_groups_per_partition = divide(self.config.num_query_groups, world_size)

        coeff = None
        if softmax_scale is None:
            self.softmax_scale = 1.0 / math.sqrt(self.hidden_size_per_attention_head)
        else:
            self.softmax_scale = softmax_scale

        if self.config.apply_query_key_layer_scaling:
            coeff = self.layer_number
            self.softmax_scale /= coeff

        self.use_flash_attn = self.args.use_flash_attn \
            and attention_type == 'self' \
            and attn_mask_type == AttnMaskType.causal

        assert self.use_flash_attn, ('DotProductAttention_QKVPacked only support flash_attn')

        if flash_attn_varlen_qkvpacked_func is None:
            raise ImportError('FlashAttention is not installed, please install with '
                                'pip install flash-attn')
        assert attention_type == 'self', ('FlashAttention code path only supports '
                                                        'self-attention for now')
        assert attn_mask_type == AttnMaskType.causal, ('FlashAttention code path only '
                                                            'supports causal mask for now')
           
        self.core_attention_flash = FlashSelfAttention_packed(causal=True, softmax_scale=self.softmax_scale, attention_dropout=self.config.attention_dropout,
                micro_batch_size = self.micro_batch_size, deterministic_mode = self.config.deterministic_mode)

    def forward(
        self,
        qkv: Tensor,
        attention_mask: Tensor,
        attn_mask_type: AttnMaskType = None,
        attention_bias: Tensor = None,
        packed_seq_params: Optional[PackedSeqParams] = None,
    ):
        """Forward."""
        assert packed_seq_params is None, (
            "Packed sequence is not supported by DotProductAttention."
            "Please use TEDotProductAttention instead."
        )
        assert attention_bias is None, "Attention bias is not supported for DotProductAttention."

        # ===================================
        # Raw attention scores. [b, n/p, s, s]
        # ===================================

        # expand the key and value [sk, b, ng, hn] -> [sk, b, np, hn]
        # This is a noop for normal attention where ng == np. When using group query attention this
        # creates a view that has the keys and values virtually repeated along their dimension to
        # match the number of queries.

        indexes = torch.arange(qkv.shape[0]*qkv.shape[1])
        kwargs = {"arg3":3, "indexes":indexes}
        if self.num_attention_heads_per_partition // self.num_query_groups_per_partition > 1:
            # [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]
            new_tensor_shape = qkv.size()[:-1] + (
                self.num_query_groups_per_partition,
                (
                    (self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)
                    * self.hidden_size_per_attention_head
                ),
            )
            qkv = qkv.view(*new_tensor_shape)

            assert self.repeat_stack_func, ('repeat_stack_func is not available')
            qkv = self.repeat_stack_func(qkv, 
                                self.num_attention_heads_per_partition, 
                                self.num_query_groups_per_partition, 
                                self.hidden_size_per_attention_head)
            if (qkv.shape[1] == 1):
                new_shape = torch.Size((qkv.shape[0] * qkv.shape[1], 3, qkv.shape[3], qkv.shape[4]))
                qkv = qkv.reshape(new_shape)
            else:
                qkv = rearrange(qkv, "s b three h d -> (b s) three h d")
        else:
            if (qkv.shape[1] == 1):
                # [sq, b, hp] --> [sq, 3, ng, hn] 
                new_tensor_shape = qkv.size()[:1] + (3, self.num_query_groups_per_partition, self.hidden_size_per_attention_head)
                qkv = qkv.view(*new_tensor_shape)
            else:
                # [sq, b, hp] --> [sq, b, 3, ng, hn] 
                new_tensor_shape = qkv.size()[:-1] + (3, self.num_query_groups_per_partition, self.hidden_size_per_attention_head)
                qkv = qkv.view(*new_tensor_shape)
                qkv = rearrange(qkv, "s b three h d -> (b s) three h d")

        if self.args.position_embedding_type == 'rope':
            if not self.args.yd_alibi:
                qkv = self.rotary_emb_flash(qkv, **kwargs)
        
        if not self.config.sequence_parallel:
            with tensor_parallel.get_cuda_rng_tracker().fork():
                context_layer = self.core_attention_flash(qkv)
        else:
            context_layer = self.core_attention_flash(qkv)
        context_layer = rearrange(context_layer, 'b s h d -> s b (h d)').contiguous()

        return context_layer #[s, b, h]
