from typing import Tuple, Optional

import torch
from torch import nn
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.llama.modeling_llama import LlamaAttention, apply_rotary_pos_emb, eager_attention_forward, \
    LlamaRotaryEmbedding, repeat_kv, eager_attention_forward, logger
from transformers.processing_utils import Unpack
from transformers.cache_utils import Cache
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS

__all__ = ['SparseAttention', 'modify_attention', 'CacheShape']

def sparse_eager_attention_forward(
        module: nn.Module,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        attention_mask: Optional[torch.Tensor],
        scaling: float,
        past_key_value: Cache,
        dropout: float = 0.0,
        **kwargs,
):
    key_states = repeat_kv(key, module.num_key_value_groups)
    value_states = repeat_kv(value, module.num_key_value_groups)

    # (bsz, self.num_heads, q_len, kv_seq_len)
    attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
    if attention_mask is not None:
        causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
        attn_weights = attn_weights + causal_mask

    # if module.attention_mask_next is not None and module.mode != "full":
    #     # print(module.attention_mask_next.shape)
    #     # print(attn_weights.shape)
    #     # input()
    #     attn_weights = attn_weights * module.attention_mask_next + (1 - module.attention_mask_next) * torch.finfo(attn_weights.dtype).min

    attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
    attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)

    attn_output = torch.matmul(attn_weights, value_states)
    attn_output = attn_output.transpose(1, 2).contiguous()

    if module.mode == "full":
        return attn_output, attn_weights

    if module.mode == "stride":
        module.step_count += attn_weights.shape[2]
        module.step_count %= module.stride_step
        if attn_weights.shape[2] != 1:
            # prefill
            tmp_mask = torch.zeros(attn_weights.shape[1], attn_weights.shape[3], dtype=torch.bool) \
                .to(attn_weights.device)
            tmp_mask[:, ::module.stride_step] = 1
            bsz, heads, length, hidden_dim = past_key_value.key_cache[module.layer_idx].shape
            tmp_mask = tmp_mask.unsqueeze(0).unsqueeze(3).expand(bsz, heads, length, hidden_dim)
            past_key_value.key_cache[module.layer_idx] = past_key_value.key_cache[module.layer_idx][tmp_mask] \
                .view(bsz, heads, -1, hidden_dim)
            past_key_value.value_cache[module.layer_idx] = past_key_value.value_cache[module.layer_idx][tmp_mask] \
                .view(bsz, heads, -1, hidden_dim)
        else:
            # decode
            if module.step_count != 1:
                past_key_value.key_cache[module.layer_idx] = past_key_value.key_cache[module.layer_idx][:, :, :-1, :]
                past_key_value.value_cache[module.layer_idx] = past_key_value.value_cache[module.layer_idx][:, :, :-1,
                                                               :]

        return attn_output, attn_weights

    # (heads, k-tokens)
    accumulation = attn_weights.sum(0).sum(1)

    if module.previous_accumulation is not None:
        accumulation[:, :-1] += module.previous_accumulation
    else:
        module.heavy_budget = int(module.heavy_ratio * accumulation.shape[-1])
        module.recent_budget = int(module.recent_ratio * accumulation.shape[-1])
        module.total_budget = module.heavy_budget + module.recent_budget

    module.previous_accumulation = accumulation
    dtype_attention_weight = attn_weights.dtype
    device_attention_weight = attn_weights.device

    if accumulation.shape[-1] > module.total_budget:
        tmp_mask = (torch.ones(accumulation.shape[0], accumulation.shape[1], dtype=torch.bool)
                    .to(device_attention_weight))
        if module.recent_budget != 0:
            tmp_mask[:, :-module.recent_budget] = 0
            selected_set = accumulation[:, :-module.recent_budget]
            # print(tmp_mask)
        else:
            tmp_mask[:, :] = 0
            selected_set = accumulation

        if module.heavy_budget != 0 and module.mode == "heavy":
            _, top_k = selected_set.topk(module.heavy_budget)
            tmp_mask = tmp_mask.scatter(-1, top_k, 1)
            # print(tmp_mask)

        # last "recent_budget - 1" tokens and heavy_budget tokens before recent window are reserved
        bsz, heads, kv_len, hidden_dim = past_key_value.key_cache[module.layer_idx].shape
        module.previous_accumulation = module.previous_accumulation[tmp_mask].view(heads, kv_len - 1)
        tmp_mask = tmp_mask.unsqueeze(0).unsqueeze(3).expand(bsz, heads, kv_len, hidden_dim)
        past_key_value.key_cache[module.layer_idx] = past_key_value.key_cache[module.layer_idx][tmp_mask] \
            .view(bsz, heads, kv_len - 1, hidden_dim)
        past_key_value.value_cache[module.layer_idx] = past_key_value.value_cache[module.layer_idx][tmp_mask] \
            .view(bsz, heads, kv_len - 1, hidden_dim)

    # (1, heads, 1, k-tokens + 1)
    # module.attention_mask_next = tmp_mask.clone().unsqueeze(0).unsqueeze(2)

    # score_mask = tmp_mask[:, :-1]
    # score_mask[:, -module.recent_budget:] = 1
    # module.previous_accumulation *=  score_mask

    return attn_output, attn_weights


class SparseAttention(nn.Module):
    """Multi-headed attention from 'Attention Is All You Need' paper"""

    def __init__(self, config: LlamaConfig, layer_idx: int, heavy_ratio=0, recent_ratio=0, stride_step=1, mode="full"):
        super().__init__()
        self.config = config
        self.layer_idx = layer_idx
        self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
        self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
        self.scaling = self.head_dim ** -0.5
        self.attention_dropout = config.attention_dropout
        self.is_causal = True

        self.q_proj = nn.Linear(
            config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
        )
        self.k_proj = nn.Linear(
            config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
        )
        self.v_proj = nn.Linear(
            config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
        )
        self.o_proj = nn.Linear(
            config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
        )

        self.attention_mask_next = None
        self.previous_accumulation = None
        self.recent_ratio = recent_ratio
        self.heavy_ratio = heavy_ratio
        self.stride_step = stride_step
        self.step_count = 0
        self.mode = mode
        self.recent_budget = None
        self.heavy_budget = None
        self.total_budget = None

    def forward(
            self,
            hidden_states: torch.Tensor,
            position_embeddings: Tuple[torch.Tensor, torch.Tensor],
            attention_mask: Optional[torch.Tensor],
            past_key_value: Optional[Cache] = None,
            cache_position: Optional[torch.LongTensor] = None,
            **kwargs: Unpack[FlashAttentionKwargs],
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        input_shape = hidden_states.shape[:-1]
        hidden_shape = (*input_shape, -1, self.head_dim)

        query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
        key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
        value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)

        cos, sin = position_embeddings
        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)

        if past_key_value is not None:
            # sin and cos are specific to RoPE models; cache_position needed for the static cache
            # cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
            key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
            # print("update:", past_key_value.key_cache[self.layer_idx].shape)
            # input()

        # print(past_key_value.key_cache[self.layer_idx].shape)
        # input()

        attention_interface: Callable = sparse_eager_attention_forward

        attn_output, attn_weights = attention_interface(
            self,
            query_states,
            key_states,
            value_states,
            attention_mask,
            past_key_value=past_key_value,
            dropout=0.0 if not self.training else self.attention_dropout,
            scaling=self.scaling,
            **kwargs,
        )

        attn_output = attn_output.reshape(*input_shape, -1).contiguous()
        attn_output = self.o_proj(attn_output)
        return attn_output, attn_weights


def modify_attention(model, heavy_ratio=0.0, recent_ratio=0.0, stride_step=1, mode="full"):
    for name, module in reversed(model._modules.items()):

        if len(list(module.children())) > 0:
            model._modules[name] = modify_attention(module, heavy_ratio, recent_ratio, stride_step, mode)

        if isinstance(module, LlamaAttention):
            model._modules[name] = SparseAttention(module.config, module.layer_idx, heavy_ratio, recent_ratio,
                                                   stride_step, mode)

    return model