import torch
from torch import nn

import math

from einops import rearrange, repeat, reduce

from .utils import FourierEmbed

import taichi as ti

ti.init(arch=ti.gpu)

# NOTE: 从已有位置导入这个函数
def pad_batched_tokens(tokens, batch_inds, num_heads):
    """
    pad N x C tokens to B x N' x C by batch id to satisfy batched attention operation\n
    
    Args:
        tokens (N x C): all token features
        batch_inds (list[torch.Tensor]): token indices for each batch
        num_heads (int): number of heads
    
    Returns:
        batched_tokens_padded (N' x B x C): padded tokens
        batched_token_masks (B * H x N' x N'): attention masks
    """
    C = tokens.shape[-1]
    device = tokens.device
    # to batched tokens
    batched_tokens = []
    batched_token_cnts = []
    for inds in batch_inds:
        batched_tokens.append(tokens[inds])
        batched_token_cnts.append(len(inds))
    # do padding
    max_len = max(batched_token_cnts)
    batched_tokens_padded = []
    batched_token_masks = []
    for t, cnt in zip(batched_tokens, batched_token_cnts):
        t = torch.cat([t, torch.zeros(max_len - cnt, C, device=device)], dim=0)
        mask = torch.full([max_len, max_len], True, device=device, dtype=torch.bool)
        mask[:cnt, :cnt] = False
        batched_tokens_padded.append(t)
        batched_token_masks.append(mask)
    batched_tokens_padded = torch.stack(batched_tokens_padded, dim=1) # [maxlen x B x C]
    batched_token_masks = torch.stack(batched_token_masks, dim=0)     # [B x maxlen x maxlen]
    batched_token_masks = repeat(batched_token_masks, "b i j -> (b h) i j", h=num_heads)
    return batched_tokens_padded.contiguous(), batched_token_masks.contiguous()



# NOTE: 1. 建议对齐参数缩进位置
#       2. 建议整理一下注释，用 tab 对齐位置 / 明确含义 / 删去非必要部分
#       3. 变量名中尽量不要大写（常量除外）
#       4. weight_T 的权重并不总要计算，但要怎么改可能需要测一下具体开销
#       5. eps 可以去掉，让出现分母为零的问题时可以更快定义到 bug
#       6. 参数 sim_X 实际上是归一化后的权重，变量名应为 weight_x，weight_X 则应为 sum_x
@ti.kernel
def softmax_Q(t: ti.types.ndarray(),   
                                    st: ti.types.ndarray(),    
                                    linked_pairs: ti.types.ndarray(),  
                                    sim: ti.types.ndarray(), 
                                    sim_S: ti.types.ndarray(),   
                                    sim_T: ti.types.ndarray(),   
                                    weight_S: ti.types.ndarray(),
                                    weight_T:ti.types.ndarray(),
                                    eps: ti.float32,
                                    sim_scale:ti.float32): #1e-7
    for i in range(linked_pairs.shape[0]):
        for j in range(t.shape[1]):
            sim[i] += t[linked_pairs[i, 0], j] * st[linked_pairs[i, 1], j]  
        sim[i] = ti.exp(sim[i]*sim_scale)      #N
        weight_S[linked_pairs[i,1]]+=sim[i]         #S  
        weight_T[linked_pairs[i,0]]+=sim[i]     # T
    for i in range(linked_pairs.shape[0]):            
        sim_S[i] = sim[i]/(weight_S[linked_pairs[i, 1]] + eps)                # N， Qt-> st
        sim_T[i] = sim[i]/(weight_T[linked_pairs[i, 0]] + eps)          #  N,   ->t 


# NOTE: 1. 逗号后面加空格
#       2. 参数 sim 实际上是归一化后的权重，变量名应为 weights
#       3. 这里的函数名可以更精炼一点，iter 和 upsample 的 kernel 重复了，比如可以根据实际上
#          干了的事情命名为 weighted_dual_index_add (使用两个 index 的加权求和 add)
#       4. 更进一步地，这些函数可以一个表示相加方向的参数合并为一个（但也需要注意性能开销是否会变大很多）
@ti.kernel
def iter_st_forward(src: ti.types.ndarray(),           
                               dst: ti.types.ndarray(),
                               linked_pairs: ti.types.ndarray(),
                               sim: ti.types.ndarray()):
    for i,j in ti.ndrange(linked_pairs.shape[0],src.shape[1]):
        dst[linked_pairs[i, 1], j] += src[linked_pairs[i, 0], j] * sim[i]   #t*Q, 更新st ->[S,C]

@ti.kernel                                                                       # 
def iter_st_backward(g_src: ti.types.ndarray(),
                                g_dst: ti.types.ndarray(),
                                linked_pairs: ti.types.ndarray(),
                                sim: ti.types.ndarray()):
    for i,j in ti.ndrange(linked_pairs.shape[0],g_src.shape[1]):
        g_src[linked_pairs[i, 0], j] += g_dst[linked_pairs[i, 1], j] * sim[i]

@ti.kernel
def upsample_t_forward(src: ti.types.ndarray(),
                               dst: ti.types.ndarray(),
                               linked_pairs: ti.types.ndarray(),
                               sim: ti.types.ndarray()
                               ):
    for i,j in ti.ndrange(linked_pairs.shape[0], src.shape[1]):
        dst[linked_pairs[i, 0], j] += src[linked_pairs[i, 1], j] * sim[i]    # st*Q, upsample->[T,C]的t     
 
@ti.kernel
def upsample_t_backward(g_src: ti.types.ndarray(),
                                g_dst: ti.types.ndarray(),
                                linked_pairs: ti.types.ndarray(),
                                sim: ti.types.ndarray()):
    for i,j in ti.ndrange(linked_pairs.shape[0], g_src.shape[1]):
        g_src[linked_pairs[i, 1], j] += g_dst[linked_pairs[i, 0], j] * sim[i]



# NOTE: 1. 类名应使用驼峰命名法
#       2. 该类的命名、参数名可以参考上述的建议
class iter_st_or_upsample_t(torch.autograd.Function):            #st = multi_index_add_w.apply(tokens, st, linked_pairs, sim) 
    @staticmethod                                            # tokens = multi_index_add_w.apply(st, tokens, linked_pairs, sim, True)
    def forward(ctx, src, dst, linked_pairs, sim,swap_index=False):  
        ctx.swap_index = swap_index
        ctx.shape = src.shape
        # NOTE: 这里可以和 backward 一样，直接生成一个新全零变量。这样只要传入一个 dst 的 shape 即可
        #       同时也不需要在外部初始化为 0 了，这样可以避免后续因为忘记归零产生的 bug
        #       另一方面，这个 inplace 操作容易引起一些梯度计算时的问题
        #       目前没有出错可能是因为在下面用到的都是新生成的变量
        dst.zero_()
        if not swap_index:
            iter_st_forward(src, dst, linked_pairs, sim)  # iter->st   dst是st
        else:
            upsample_t_forward(src, dst, linked_pairs, sim)   # upsample->t  dst是t
        ti.sync()
        ctx.save_for_backward(linked_pairs, sim)
        return dst

    @staticmethod
    def backward(ctx, grad_output):
        linked_pairs, sim = ctx.save_variables
        g_src = torch.zeros(ctx.shape, device=grad_output.device)
        if not ctx.swap_index:
            iter_st_backward(g_src, grad_output, linked_pairs, sim)
        else:
            upsample_t_backward(g_src, grad_output, linked_pairs, sim)
        ti.sync()
        return (g_src, None, None, None, None)


# NOTE: 主要类名建议和文件名对应
class SparseSuperTokenAttention_taichi(nn.Module):
    def __init__(
        self,
        ch: int,
        num_heads: int,
        num_super_token_iters: int = 1,
        pe_temperature: float = 0.1,
        dropout: float = 0.1,
        dim: int = 3,
        use_same_association: bool = True,
    ) -> None:
        super().__init__()
        # constants
        self.use_same_association = use_same_association
        self.dim = dim
        self.num_heads = num_heads
        self.num_super_token_iters = num_super_token_iters
        self.sim_scale = 1 / math.sqrt(ch)
        # modules
        # NOTE: 不建议在 init 中将模块放到 cuda 上，建议在创建模块后再将整个模块放到 cuda 上
        self.pos_embed = FourierEmbed(ch, dim, pe_temperature).to("cuda")
        self.attn = nn.MultiheadAttention(ch, num_heads=num_heads, dropout=dropout).to("cuda")
        self.attn_norm = nn.LayerNorm(ch).to("cuda")
        # debug
        self.debug = False    

    def set_debug(self, value: bool):
        self.debug = value
        self.debug_states = {}
    
    def get_debug_info(self):
        return self.debug_states
    
    def forward(
        self, 
        token_features: torch.Tensor,
        token_positions: torch.Tensor,
        super_token_src: torch.Tensor, 
        batch_super_token_ids: list[torch.Tensor], 
        linked_pairs: torch.Tensor, 
        num_linked_super_tokens: torch.Tensor,
        num_linked_tokens: torch.Tensor,
        batch_size: int,
    ) -> torch.Tensor:

        """
        Args:
            tokens (T x F): raw token features
            token_positionss (T x D): raw token position
            super_token_src (T): super_token index for each token
            batch_super_token_ids (list[torch.Tensor]): super token indices for each batch
            linked_pairs (N x 2): N [token, super_token] index pairs (N > T)
            num_linked_super_tokens (T): number of linked super tokens of each token
            num_linked_tokens (S): number of linked tokens of each super token
            dim (int): dimension of token position
        
        Returns:
            tokens (N x C): tokens after SSTA
        """
        D = self.dim
        T, F = token_features.shape
        N = linked_pairs.shape[0]  #N
        S = sum(b.shape[0] for b in batch_super_token_ids) 
        B = batch_size
        device = token_features.device
        
        zoom = S/T
        residual = token_features
        
        # initialize super tokens
        with torch.no_grad():
            st_feat = torch.zeros(S, F, device=device)
            st_feat = st_feat.index_reduce_(0, super_token_src, token_features, 'mean', include_self=False)

        # super_token iter_w_ti
        with torch.no_grad():     
            # NOTE: 1. 中间变量命名见函数上的建议   
            #       2. dtype 可以省略或直接使用 float（zeros 函数默认生成 float 类型）
            #       3. device 和 shape 建议使用 forward 函数最开始定义的那些值
            #       4. 有的地方没似乎必要空一行，可以去掉
            sim = torch.zeros(linked_pairs.shape[0], dtype=token_features.dtype, device=token_features.device)
            
            sim_s= torch.zeros(linked_pairs.shape[0], dtype=token_features.dtype, device=token_features.device)
            weight_s = torch.zeros(st_feat.shape[0], dtype=token_features.dtype, device=token_features.device)
            sim_t= torch.zeros(linked_pairs.shape[0], dtype=token_features.dtype, device=token_features.device)
            weight_t = torch.zeros(T, dtype=token_features.dtype, device=token_features.device)
            
            for idx in range(self.num_super_token_iters):
                # NOTE: sim 应该在此处初始化为 0
                softmax_Q(token_features,st_feat,linked_pairs,sim,sim_s,sim_t,weight_s,weight_t,1e-7,self.sim_scale)
                ti.sync()
               
                if idx < self.num_super_token_iters - 1:
                    st_feat = torch.zeros(S, F, device=device)# 清零
                    iter_st_forward(token_features, st_feat, linked_pairs, sim_s)   
                    ti.sync()                   
            st_pos = torch.zeros(S, D, device=device)
            iter_st_forward(token_positions, st_pos, linked_pairs, sim_s)
        
        # NOTE: 修改建议见该类的注释
        st_feat = torch.zeros(S, F, device=device)# 清零        
        st_feat = iter_st_or_upsample_t.apply(token_features, st_feat, linked_pairs,sim_s)  

        # pos embed
        st_feat = (self.pos_embed(st_pos) + st_feat).contiguous().to(device)

        # global attention
        batched_st, attn_mask = pad_batched_tokens(st_feat, batch_super_token_ids, self.num_heads) # [max_len,batch_size,C] #batched_st.shape,[209, 4, 4,]  [B*h,max_len,max_len]attn_mask.shape:torch.Size([8, 209, 209]
        batched_st, attn_map = self.attn(batched_st, batched_st, batched_st, attn_mask=attn_mask)
        st_feat = torch.zeros_like(st_feat, requires_grad=True)
        for b in range(B): # 
            st_feat = st_feat.index_add(0, batch_super_token_ids[b], batched_st[:len(batch_super_token_ids[b]), b])  
       

        # token upsample_w_ti
        token_features = torch.zeros(T, F, device=device)

        if self.use_same_association:
            token_features = iter_st_or_upsample_t.apply(st_feat, token_features, linked_pairs, sim_s,True)        
            token_features = token_features/zoom
            #print('still s')                     
        else:
            token_features = iter_st_or_upsample_t.apply(st_feat, token_features, linked_pairs, sim_t,True)  
            #print('s to t ')
    
        token_features = self.attn_norm(token_features + residual)

        return token_features



