# coding=utf-8
# Copyright 2024 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import inspect
import os
from typing import Optional, Tuple

import torch
import torch.nn.functional as F

from transformers.utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal
assert is_flash_attn_2_available()
assert is_flash_attn_greater_or_equal("2.6.0")

from transformers.modeling_flash_attention_utils import _upad_input, _get_unpad_data
from flash_attn.flash_attn_interface import _flash_attn_varlen_forward, _flash_attn_varlen_backward
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa
assert "window_size" in list(inspect.signature(_flash_attn_varlen_forward).parameters)


@torch.jit.script
def _update_out_and_lse(
    out: torch.Tensor,
    lse: torch.Tensor,
    block_out: torch.Tensor,
    block_lse: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
    block_out = block_out.to(torch.float32)
    block_lse = block_lse.transpose(-2, -1).unsqueeze(dim=-1)

    # new_lse = lse + torch.log(1 + torch.exp(block_lse - lse))
    # torch.exp(lse - new_lse) * out + torch.exp(block_lse - new_lse) * block_out
    # For additional context and discussion, please refer to:
    # https://github.com/zhuzilin/ring-flash-attention/pull/34#issuecomment-2076126795
    out = out - F.sigmoid(block_lse - lse) * (out - block_out)
    lse = lse - F.logsigmoid(lse - block_lse)

    return out, lse


def update_out_and_lse(
    out: Optional[torch.Tensor],
    lse: Optional[torch.Tensor],
    block_out: torch.Tensor,
    block_lse: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
    if out is None:
        out = block_out.to(torch.float32)
        lse = block_lse.transpose(-2, -1).unsqueeze(dim=-1)
    else:
        out, lse = _update_out_and_lse(out, lse, block_out, block_lse)
    return out, lse


class FlashDAttnVarlenFunc(torch.autograd.Function):
    @staticmethod
    def forward(
        ctx,
        q_t2t, k_t2t, v_t2t,
        cu_seqlens_q_t2t, cu_seqlens_k_t2t,
        max_seqlen_q_t2t, max_seqlen_k_t2t,
        dropout_p_t2t, window_size_t2t,
        alibi_slopes_t2t, block_table_t2t,
        
        q_t2m, k_t2m, v_t2m,
        cu_seqlens_q_t2m, cu_seqlens_k_t2m,
        max_seqlen_q_t2m, max_seqlen_k_t2m,
        dropout_p_t2m, window_size_t2m,
        alibi_slopes_t2m, block_table_t2m,

        softcap, softmax_scale, deterministic, return_attn_probs,
    ):
        if softmax_scale is None:
            softmax_scale = q_t2t.shape[-1] ** (-0.5)

        out, lse = None, None

        # T2T Self-Attn
        out_t2t, _, _, _, _, softmax_lse_t2t, _, rng_state_t2t = _flash_attn_varlen_forward(
            q_t2t,
            k_t2t,
            v_t2t,
            cu_seqlens_q_t2t,
            cu_seqlens_k_t2t,
            max_seqlen_q_t2t,
            max_seqlen_k_t2t,
            dropout_p_t2t,
            softmax_scale,
            causal=True,
            window_size=window_size_t2t,
            softcap=softcap,
            alibi_slopes=alibi_slopes_t2t,
            return_softmax=return_attn_probs and dropout_p_t2t > 0,
            block_table=block_table_t2t,
        )
        out, lse = update_out_and_lse(out, lse, out_t2t, softmax_lse_t2t)

        # T2M Cross-Attn
        out_t2m, _, _, _, _, softmax_lse_t2m, _, rng_state_t2m = _flash_attn_varlen_forward(
            q_t2m,
            k_t2m,
            v_t2m,
            cu_seqlens_q_t2m,
            cu_seqlens_k_t2m,
            max_seqlen_q_t2m,
            max_seqlen_k_t2m,
            dropout_p_t2m,
            softmax_scale,
            causal=False,
            window_size=window_size_t2m,
            softcap=softcap,
            alibi_slopes=alibi_slopes_t2m,
            return_softmax=return_attn_probs and dropout_p_t2m > 0,
            block_table=block_table_t2m,
        )
        out, lse = update_out_and_lse(out, lse, out_t2m, softmax_lse_t2m)

        out = out.to(q_t2t.dtype)
        lse = lse.squeeze(dim=-1).transpose(0, 1)

        ctx.save_for_backward(
            out, lse,
            q_t2t, k_t2t, v_t2t, cu_seqlens_q_t2t, cu_seqlens_k_t2t, rng_state_t2t,
            q_t2m, k_t2m, v_t2m, cu_seqlens_q_t2m, cu_seqlens_k_t2m, rng_state_t2m
        )
        ctx.dropout_p_t2t = dropout_p_t2t
        ctx.max_seqlen_q_t2t = max_seqlen_q_t2t
        ctx.max_seqlen_k_t2t = max_seqlen_k_t2t
        ctx.causal_t2t = True
        ctx.window_size_t2t = window_size_t2t
        ctx.alibi_slopes_t2t = alibi_slopes_t2t

        ctx.dropout_p_t2m = dropout_p_t2m
        ctx.max_seqlen_q_t2m = max_seqlen_q_t2m
        ctx.max_seqlen_k_t2m = max_seqlen_k_t2m
        ctx.causal_t2m = False
        ctx.window_size_t2m = window_size_t2m
        ctx.alibi_slopes_t2m = alibi_slopes_t2m

        ctx.softcap = softcap
        ctx.softmax_scale = softmax_scale
        ctx.deterministic = deterministic

        return out if not return_attn_probs else (out, lse, None)

    @staticmethod
    def backward(ctx, dout, *args):
        out, lse, \
        q_t2t, k_t2t, v_t2t, cu_seqlens_q_t2t, cu_seqlens_k_t2t, rng_state_t2t, \
        q_t2m, k_t2m, v_t2m, cu_seqlens_q_t2m, cu_seqlens_k_t2m, rng_state_t2m \
            = ctx.saved_tensors
        
        dq_t2t, dk_t2t, dv_t2t = torch.empty_like(q_t2t), torch.empty_like(k_t2t), torch.empty_like(v_t2t)
        _flash_attn_varlen_backward(
            dout,
            q_t2t,
            k_t2t,
            v_t2t,
            out,
            lse,
            dq_t2t,
            dk_t2t,
            dv_t2t,
            cu_seqlens_q_t2t,
            cu_seqlens_k_t2t,
            ctx.max_seqlen_q_t2t,
            ctx.max_seqlen_k_t2t,
            ctx.dropout_p_t2t,
            ctx.softmax_scale,
            ctx.causal_t2t,
            ctx.window_size_t2t,
            ctx.softcap,
            ctx.alibi_slopes_t2t,
            ctx.deterministic,
            rng_state=rng_state_t2t,
        )
        dq_t2t = dq_t2t[..., : dout.shape[-1]]  # We could have padded the head dimension
        dk_t2t = dk_t2t[..., : dout.shape[-1]]
        dv_t2t = dv_t2t[..., : dout.shape[-1]]

        if ctx.max_seqlen_k_t2m == 0:
            dq_t2m, dk_t2m, dv_t2m = torch.zeros_like(q_t2m), torch.zeros_like(k_t2m), torch.zeros_like(v_t2m)
        else:
            dq_t2m, dk_t2m, dv_t2m = torch.empty_like(q_t2m), torch.empty_like(k_t2m), torch.empty_like(v_t2m)
            _flash_attn_varlen_backward(
                dout,
                q_t2m,
                k_t2m,
                v_t2m,
                out,
                lse,
                dq_t2m,
                dk_t2m,
                dv_t2m,
                cu_seqlens_q_t2m,
                cu_seqlens_k_t2m,
                ctx.max_seqlen_q_t2m,
                ctx.max_seqlen_k_t2m,
                ctx.dropout_p_t2m,
                ctx.softmax_scale,
                ctx.causal_t2m,
                ctx.window_size_t2m,
                ctx.softcap,
                ctx.alibi_slopes_t2m,
                ctx.deterministic,
                rng_state=rng_state_t2m,
            )
            dq_t2m = dq_t2m[..., : dout.shape[-1]]  # We could have padded the head dimension
            dk_t2m = dk_t2m[..., : dout.shape[-1]]
            dv_t2m = dv_t2m[..., : dout.shape[-1]]

        return dq_t2t, dk_t2t, dv_t2t, None, None, None, None, None, None, None, None, \
            dq_t2m, dk_t2m, dv_t2m, None, None, None, None, None, None, None, None, \
            None, None, None, None


def flash_dattn_varlen_func(
    q_t2t=None, k_t2t=None, v_t2t=None,
    cu_seqlens_q_t2t=None, cu_seqlens_k_t2t=None,
    max_seqlen_q_t2t=None, max_seqlen_k_t2t=None,
    dropout_p_t2t=0.0, window_size_t2t=(-1, -1),
    alibi_slopes_t2t=None, block_table_t2t=None,
    
    q_t2m=None, k_t2m=None, v_t2m=None,
    cu_seqlens_q_t2m=None, cu_seqlens_k_t2m=None,
    max_seqlen_q_t2m=None, max_seqlen_k_t2m=None,
    dropout_p_t2m=0.0, window_size_t2m=(-1, -1),
    alibi_slopes_t2m=None, block_table_t2m=None,

    softcap=0.0, softmax_scale=None, deterministic=False, return_attn_probs=False,
):
    return FlashDAttnVarlenFunc.apply(
        q_t2t, k_t2t, v_t2t,
        cu_seqlens_q_t2t, cu_seqlens_k_t2t,
        max_seqlen_q_t2t, max_seqlen_k_t2t,
        dropout_p_t2t, window_size_t2t,
        alibi_slopes_t2t, block_table_t2t,
        
        q_t2m, k_t2m, v_t2m,
        cu_seqlens_q_t2m, cu_seqlens_k_t2m,
        max_seqlen_q_t2m, max_seqlen_k_t2m,
        dropout_p_t2m, window_size_t2m,
        alibi_slopes_t2m, block_table_t2m,

        softcap, softmax_scale, deterministic, return_attn_probs
    )


_upad_input_t2t = _upad_input


def _upad_input_t2m(
    query_layer: torch.Tensor,
    key_layer: torch.Tensor,
    value_layer: torch.Tensor,
    q_attention_mask: torch.Tensor,
    kv_attention_mask: torch.Tensor,
    query_length: int,
):
    """
    Unpads query, key, and values tensors, using a single dimension for all tokens even though they belong to different batches.

    This function is used instead of `flash_attn.bert_padding.unpad_input` in order to avoid the recomputation of the same intermediary
    tensors for query, key, value tensors.

    Arguments:
        query_layer (`torch.Tensor`):
            Query state with padding. Shape: (batch_size, query_length, num_heads, head_dim).
        key_layer (`torch.Tensor`):
            Key state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
        value_layer (`torch.Tensor`):
            Value state with padding. Shape: (batch_size, kv_seq_len, num_key_value_heads, head_dim).
        q_attention_mask (`torch.Tensor`):
            Attention mask for query state. Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.
        kv_attention_mask (`torch.Tensor`):
            Attention mask for key and value states. Boolean or int tensor of shape (batch_size, sequence_length), 1 means valid and 0 means not valid.
        query_length (`int`):
            Target length.
    Return:
        query_layer (`torch.Tensor`):
            Query state without padding. Shape: (total_target_length, num_heads, head_dim).
        key_layer (`torch.Tensor`):
            Key state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
        value_layer (`torch.Tensor`):
            Value state with padding. Shape: (total_source_length, num_key_value_heads, head_dim).
        indices_q (`torch.Tensor`):
            The indices of non-masked tokens from the flattened input target sequence.
        (cu_seqlens_q, cu_seqlens_k) (`Tuple[int]`):
            The cumulative sequence lengths for the target (query) and source (key, value), used to index into ragged (unpadded) tensors. `cu_seqlens` shape is (batch_size + 1,).
        (max_seqlen_in_batch_q, max_seqlen_in_batch_k) (`Tuple[int]`):
            Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query, `max_seqlen_in_batch_k` for the source sequence i.e. key/value).
    """
    indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(kv_attention_mask)
    batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape

    key_layer = index_first_axis(
        key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
    )
    value_layer = index_first_axis(
        value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
    )

    if query_length == 1:
        max_seqlen_in_batch_q = 1
        cu_seqlens_q = torch.arange(
            batch_size + 1, dtype=torch.int32, device=query_layer.device
        )  # There is a memcpy here, that is very bad.
        indices_q = cu_seqlens_q[:-1]
        query_layer = query_layer.squeeze(1)
    else:
        query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, q_attention_mask)

    return (
        query_layer,
        key_layer,
        value_layer,
        indices_q,
        (cu_seqlens_q, cu_seqlens_k),
        (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
    )


def flash_decomposed_attention_forward(
    q_t2t: torch.Tensor,
    k_t2t: torch.Tensor,
    v_t2t: torch.Tensor,
    attention_mask_t: torch.Tensor,

    q_t2m: torch.Tensor,
    k_t2m: torch.Tensor,
    v_t2m: torch.Tensor,
    attention_mask_m: torch.Tensor,
    
    dropout_t2t: float = 0.0,
    sliding_window_t2t: Optional[int] = None,
    dropout_t2m: float = 0.0,
    sliding_window_t2m: Optional[int] = None,
    softmax_scale: Optional[float] = None,
    softcap: Optional[float] = None,
    deterministic: bool = None,
):
    flash_kwargs = {}

    # Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
    if (sliding_window_t2t is not None and k_t2t.shape[1] > sliding_window_t2t):
        flash_kwargs["window_size_t2t"] = (sliding_window_t2t, sliding_window_t2t)
    if (sliding_window_t2m is not None and k_t2m.shape[1] > sliding_window_t2m):
        flash_kwargs["window_size_t2m"] = (sliding_window_t2m, sliding_window_t2m)

    if deterministic is None:
        deterministic = os.environ.get("FLASH_ATTENTION_DETERMINISTIC", "0") == "1"
    flash_kwargs["deterministic"] = deterministic

    if softcap is not None:
        flash_kwargs["softcap"] = softcap
    
    if softmax_scale is not None:
        flash_kwargs["softmax_scale"] = softmax_scale
    
    # Contains at least one padding token in the sequence
    if attention_mask_t is not None and attention_mask_m is not None:
        batch_size, query_length = q_t2t.shape[:2]

        q_t2t, k_t2t, v_t2t, indices_q, cu_seq_lens_t2t, max_seq_lens_t2t = _upad_input_t2t(
            q_t2t, k_t2t, v_t2t, attention_mask_t, query_length
        )
        cu_seqlens_q_t2t, cu_seqlens_k_t2t = cu_seq_lens_t2t
        max_seqlen_q_t2t, max_seqlen_k_t2t = max_seq_lens_t2t

        q_t2m, k_t2m, v_t2m, _, cu_seq_lens_t2m, max_seq_lens_t2m = _upad_input_t2m(
            q_t2m, k_t2m, v_t2m, attention_mask_t, attention_mask_m, query_length
        )
        cu_seqlens_q_t2m, cu_seqlens_k_t2m = cu_seq_lens_t2m
        max_seqlen_q_t2m, max_seqlen_k_t2m = max_seq_lens_t2m

        attn_output_unpad = flash_dattn_varlen_func(
            q_t2t=q_t2t, k_t2t=k_t2t, v_t2t=v_t2t,
            cu_seqlens_q_t2t=cu_seqlens_q_t2t, cu_seqlens_k_t2t=cu_seqlens_k_t2t,
            max_seqlen_q_t2t=max_seqlen_q_t2t, max_seqlen_k_t2t=max_seqlen_k_t2t,
            dropout_p_t2t=dropout_t2t,
            
            q_t2m=q_t2m, k_t2m=k_t2m, v_t2m=v_t2m,
            cu_seqlens_q_t2m=cu_seqlens_q_t2m, cu_seqlens_k_t2m=cu_seqlens_k_t2m,
            max_seqlen_q_t2m=max_seqlen_q_t2m, max_seqlen_k_t2m=max_seqlen_k_t2m,
            dropout_p_t2m=dropout_t2m,

            **flash_kwargs
        )

        attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
    elif attention_mask_t is None and attention_mask_m is None:
        raise NotImplementedError("Both attention_mask_t and attention_mask_m should not be None.")
        # attn_output = flash_attn_func(
        #     query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, **flash_kwargs
        # )
    else:
        raise ValueError("attention_mask_t and attention_mask_m should both not be None or both be None.")

    return attn_output