import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, reduce, repeat
from einops.layers.torch import Reduce, Rearrange
from timm.layers import DropPath


def precompute_rpe(dim: int, max_len=640, theta=10000.0):
    freqs = 1.0 / (theta**(torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
    t = torch.arange(max_len, device=freqs.device)
    freqs = torch.outer(t, freqs).float()
    rpe = torch.polar(torch.ones_like(freqs), freqs)
    return rpe


def apply_rel_pe_qk(xq, xk, pos, rpe):
    d_model = xq.shape[-1]
    xq_ = xq.float().reshape(-1, d_model // 2, 2)  # (b t c) d//2 2
    xk_ = xk.float().reshape(-1, d_model // 2, 2)
    pos = pos.reshape(-1)  # (b t c)

    xq_ = torch.view_as_complex(xq_)  # (b t c) d//2
    xk_ = torch.view_as_complex(xk_)

    xq_ = torch.view_as_real(xq_ * rpe[pos, :])
    xk_ = torch.view_as_real(xk_ * rpe[pos, :])
    return xq_.type_as(xq).flatten(1), xk_.type_as(xk).flatten(1)  # (b t c) d


def precompute_ape(d_model, max_len=640, theta=10000.0):
    position = torch.arange(max_len).unsqueeze(1)
    div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(theta) / d_model))
    pe = torch.zeros(max_len, d_model)
    pe[:, 0::2] = torch.sin(position * div_term)
    pe[:, 1::2] = torch.cos(position * div_term)
    return pe


def apply_abs_pe(x, pos, ape):
    # pos: b t c
    pe = ape[pos.flatten(), :]
    return x + pe.reshape(*(pos.shape), -1)


def apply_abs_pe_qk(xq, xk, pos, ape):
    # pos: b t c
    pe = ape[pos.flatten(), :]
    return xq.flatten(0, 2) + pe, xk.flatten(0, 2) + pe


PE_QK_FUNC = {'rel': apply_rel_pe_qk, 'abs': apply_abs_pe_qk}


class LayerScale(nn.Module):

    def __init__(self, dim: int, init_values=1e-5):
        super().__init__()
        self.gamma = nn.Parameter(init_values * torch.ones(dim))

    def forward(self, x):
        return x * self.gamma

class MLP(nn.Module):

    def __init__(self, d_model, r_hid, drop=0.1, norm_first=True, layer_scale=True):
        super().__init__()
        if layer_scale:
            ls = LayerScale(d_model)
        else:
            ls = nn.Identity()
        self.net = nn.Sequential(nn.Linear(d_model, d_model * r_hid), nn.GELU(),
                                 nn.Linear(d_model * r_hid, d_model), ls,
                                 DropPath(drop))
        self.norm = nn.LayerNorm(d_model)
        self.norm_first = norm_first

    def forward(self, x, x_mask=None):
        if self.norm_first:
            x = x + self.net(self.norm(x))
        else:
            x = self.norm(x + self.net(x))
        return x
    
class SwiGLUMLP(nn.Module):

    def __init__(self, d_model, r_hid, drop=0.1, norm_first=True, layer_scale=True):
        super().__init__()
        # SwiGLU 的隐藏层维度通常是 2/3 * 4d_model，以保持参数量与标准 MLP 近似
        hidden_dim = int(d_model * r_hid * 2 / 3) 
        
        self.w1 = nn.Linear(d_model, hidden_dim)
        self.w2 = nn.Linear(d_model, hidden_dim)
        self.w3 = nn.Linear(hidden_dim, d_model)
        self.act = nn.SiLU()
        self.drop = nn.Dropout(drop)
        
        self.norm = nn.LayerNorm(d_model)
        self.norm_first = norm_first
        
        # LayerScale 和 DropPath 必须应用于 SwiGLU 的输出
        if layer_scale:
            self.layer_scale = LayerScale(d_model)
        else:
            self.layer_scale = nn.Identity()
            
        self.drop_path = DropPath(drop) if drop > 0. else nn.Identity()

        # [关键修复] 显式初始化，防止 SwiGLU 乘法导致数值爆炸
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            # 使用较小的 gain (0.5 或更小) 来抵消 SwiGLU 中乘法带来的方差放大
            nn.init.xavier_uniform_(m.weight, gain=0.5)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)

    def _swiglu_forward(self, x):
        """SwiGLU 核心计算: (SiLU(xW1) * xW2)W3"""
        x1 = self.w1(x)
        x2 = self.w2(x)
        hidden = self.act(x1) * x2
        return self.drop(self.w3(hidden))

    def forward(self, x, x_mask=None):
        if self.norm_first:
            # Pre-Norm 结构: x = x + DropPath(LayerScale(SwiGLU(Norm(x))))
            # 1. Norm
            x_norm = self.norm(x)
            # 2. SwiGLU
            out = self._swiglu_forward(x_norm)
            # 3. LayerScale + DropPath + Residual
            x = x + self.drop_path(self.layer_scale(out))
        else:
            # Post-Norm 结构
            out = self._swiglu_forward(x)
            x = self.norm(x + self.drop_path(self.layer_scale(out)))
            
        return x

class TemporalAttn(nn.Module):

    def __init__(self, d_model, drop=0.1, norm_first=False, layer_scale=True):
        super().__init__()

        self.wq = nn.Linear(d_model, d_model)
        self.wk = nn.Linear(d_model, d_model)
        self.wv = nn.Linear(d_model, d_model)
        self.drop = nn.Dropout(drop)
        self.layer_norm = nn.LayerNorm(d_model)
        self.norm_first = norm_first
        if layer_scale:
            self.layer_scale = LayerScale(d_model)
        else:
            self.layer_scale = nn.Identity()
        self.d_head = d_model

    def _attn_block(self, x, x_mask, pos, pe, pe_type='rel'):
        bsz, nt, nc, nd = x.shape

        if pe_type in PE_QK_FUNC:
            xq, xk = PE_QK_FUNC[pe_type](self.wq(x), self.wk(x), pos, pe)
            xq = rearrange(xq, "(b t c) d -> (b c) t d", b=bsz, t=nt)
            xk = rearrange(xk, "(b t c) d -> (b c) t d", b=bsz, t=nt)
        else:
            xq, xk = self.wq(x), self.wk(x)
            xq = rearrange(xq, "b t c d -> (b c) t d")
            xk = rearrange(xk, "b t c d -> (b c) t d")

        xv = rearrange(self.wv(x), "b t c d -> b c t d")

        attn = rearrange(torch.matmul(xq, xk.transpose(1, 2)) /
                         math.sqrt(self.d_head),
                         "(b c) tq tk -> b c tq tk",
                         b=bsz)

        mask = x_mask.transpose(1, 2)
        mask = mask[:, :, :, None] | mask[:, :, None, :]
        attn = torch.masked_fill(attn, mask, float('-inf'))
        attn = self.drop(F.softmax(attn, -1).nan_to_num(0))

        out = torch.einsum("bcmn,bcnd->bmcd", attn, xv)

        return out, attn

    def forward(self, x, x_mask, pos, pe, pe_type='rel'):
        if self.norm_first:
            out, attn = self._attn_block(self.layer_norm(x), x_mask, pos, pe,
                                         pe_type)
            out = x + self.layer_scale(out)
        else:
            out, attn = self._attn_block(x, x_mask, pos, pe, pe_type)
            out = self.layer_norm(x + self.layer_scale(self.drop(out)))

        return out, attn


class TokenMixingAttn(nn.Module):

    def __init__(self, d_model, drop=0.1, norm_first=False, layer_scale=True):
        super().__init__()

        self.wq = nn.Linear(d_model, d_model)
        self.wk = nn.Linear(d_model, d_model)
        self.wv = nn.Linear(d_model, d_model)
        self.drop = nn.Dropout(drop)
        self.layer_norm = nn.LayerNorm(d_model)
        self.norm_first = norm_first
        if layer_scale:
            self.layer_scale = LayerScale(d_model)
        else:
            self.layer_scale = nn.Identity()
        self.d_head = d_model

    def _attn_block(self, x, x_mask, p_mask, pos, pe, imp, idx_c, pe_type='rel',moe=False):
        bsz, nt, nc, nd = x.shape
        #simplfy model
        if moe:
            imp = repeat(imp, "b t -> b t c", c=nc)
            idx_c = repeat(torch.where(x_mask, imp, idx_c[:, :nt, ...]),
                        'b t c -> b t c d',
                        d=nd)
            x = torch.gather(x, 2, idx_c)

        if pe_type in PE_QK_FUNC:
            xq, xk = PE_QK_FUNC[pe_type](self.wq(x), self.wk(x), pos, pe)
            xq = rearrange(xq, "(b t c) d -> (b c) t d", b=bsz, t=nt)
            xk = rearrange(xk, "(b t c) d -> (b c) t d", b=bsz, t=nt)
        else:
            xq, xk = self.wq(x), self.wk(x)
            xq = rearrange(xq, "b t c d -> (b c) t d")
            xk = rearrange(xk, "b t c d -> (b c) t d")

        xv = rearrange(self.wv(x), "b t c d -> b c t d")

        attn = rearrange(torch.matmul(xq, xk.transpose(1, 2)) /
                         math.sqrt(self.d_head),
                         "(b c) tq tk -> b c tq tk",
                         b=bsz)

        x_mask = x_mask.transpose(1, 2)
        p_mask = p_mask.transpose(1, 2)
        mask = p_mask[:, :, :, None] | p_mask[:, :, None, :]
        attn = F.softmax(attn.masked_fill(mask, float('-inf')), -1).nan_to_num(0)

        weight = torch.where(x_mask, 1 / nt, 1)
        weighted_attn = attn * weight[:, :, None, :]

        out = torch.einsum("bcmn,bcnd->bmcd", self.drop(weighted_attn), xv)
        return out, attn

    def forward(self,
                x,
                x_mask,
                p_mask,
                pos,
                pe,
                imp,
                idx_c,
                pe_type='rel',
                return_imp=False,moe=False):
        if self.norm_first:
            out, attn = self._attn_block(self.layer_norm(x), x_mask, p_mask, pos, pe,
                                         imp, idx_c, pe_type,moe)
            out = x + self.layer_scale(out)
        else:
            out, attn = self._attn_block(x, x_mask, p_mask, pos, pe, imp, idx_c,
                                         pe_type,moe)
            out = self.layer_norm(x + self.layer_scale(self.drop(out)))
        if return_imp:
            imp = F.softmax(attn, -1).nan_to_num(0)[:, :, 0, 1:].sum(1)  # b t
            return out, imp
        else:
            return out


class ChannelAttn(nn.Module):

    def __init__(self, d_model, drop=0.1, norm_first=False, layer_scale=True):
        super().__init__()
        self.d_head = d_model
        self.wq = nn.Linear(d_model, d_model)
        self.wk = nn.Linear(d_model, d_model)
        self.wv = nn.Linear(d_model, d_model)
        self.drop = nn.Dropout(drop)
        self.layer_norm = nn.LayerNorm(d_model)
        self.norm_first = norm_first
        if layer_scale:
            self.layer_scale = LayerScale(d_model)
        else:
            self.layer_scale = nn.Identity()

    def _attn_block(self, x, x_mask):
        xq = self.wq(x)
        xk = self.wk(x)
        xv = self.wv(x)
        attn = torch.einsum("btqd,btkd->btqk", xq, xk) / math.sqrt(self.d_head)
        attn_mask = x_mask[:, :, None, :] | x_mask[:, :, :, None]
        attn = torch.masked_fill(attn, attn_mask, float('-inf'))
        attn = self.drop(F.softmax(attn, -1).nan_to_num(0))
        out = torch.einsum("btqk,btkd->btqd", attn, xv)
        return out

    def forward(self, x, x_mask, *args, **kwargs):
        if self.norm_first:
            out = self._attn_block(self.layer_norm(x), x_mask)
            out = x + self.layer_scale(out)
        else:
            out = self._attn_block(x, x_mask)
            out = self.layer_norm(x + self.layer_scale(self.drop(out)))
        return out


class CLSHead(nn.Module):

    def __init__(self, d_model, d_static, num_cls, drop=0.1):
        d_out = d_model + d_static
        super().__init__()
        if d_static > 0:
            self.net = nn.Sequential(
                nn.Linear(d_out, d_model * 4),
                nn.GELU(),
                nn.Dropout(drop),
                nn.Linear(d_model * 4, num_cls),
            )
            nn.init.xavier_uniform_(self.net[0].weight)
            nn.init.xavier_uniform_(self.net[3].weight)
        else:
            self.net = nn.Linear(d_out, num_cls)
            nn.init.xavier_uniform_(self.net.weight)

    def forward(self, x):
        # x: b d_model+d_static
        return self.net(x)
