import sys

sys.path.insert(0, "")

import torch
import torch.nn as nn
import torch.nn.functional as F

from model.activation import activation_factory


class TemporalConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1):
        super(TemporalConv, self).__init__()
        pad = (kernel_size + (kernel_size - 1) * (dilation - 1) - 1) // 2
        self.conv = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=(kernel_size, 1),
            padding=(pad, 0),
            stride=(stride, 1),
            dilation=(dilation, 1),
        )

        self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return x


class MultiScale_TemporalConv(nn.Module):
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size=3,
        stride=1,
        dilations=[1, 2],
        residual=True,
        residual_kernel_size=1,
        activation="relu",
    ):
        super().__init__()
        assert out_channels % (len(dilations) + 2) == 0, (
            "# out channels should be multiples of # branches"
        )

        # Multiple branches of temporal convolution
        self.num_branches = len(dilations) + 2
        branch_channels = out_channels // self.num_branches

        # Temporal Convolution branches
        self.branches = nn.ModuleList(
            [
                nn.Sequential(
                    nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0),
                    nn.BatchNorm2d(branch_channels),
                    activation_factory(activation),
                    TemporalConv(
                        branch_channels,
                        branch_channels,
                        kernel_size=kernel_size,
                        stride=stride,
                        dilation=dilation,
                    ),
                )
                for dilation in dilations
            ]
        )

        # Additional Max & 1x1 branch
        self.branches.append(
            nn.Sequential(
                nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0),
                nn.BatchNorm2d(branch_channels),
                activation_factory(activation),
                nn.MaxPool2d(kernel_size=(3, 1), stride=(stride, 1), padding=(1, 0)),
                nn.BatchNorm2d(branch_channels),
            )
        )

        self.branches.append(
            nn.Sequential(
                nn.Conv2d(
                    in_channels,
                    branch_channels,
                    kernel_size=1,
                    padding=0,
                    stride=(stride, 1),
                ),
                nn.BatchNorm2d(branch_channels),
            )
        )

        # Residual connection
        if not residual:
            self.residual = lambda x: 0
        elif (in_channels == out_channels) and (stride == 1):
            self.residual = lambda x: x
        else:
            self.residual = TemporalConv(
                in_channels,
                out_channels,
                kernel_size=residual_kernel_size,
                stride=stride,
            )

        self.act = activation_factory(activation)

    def forward(self, x):
        # Input dim: (N,C,T,V)
        res = self.residual(x)
        branch_outs = []
        for tempconv in self.branches:
            out = tempconv(x)
            branch_outs.append(out)

        out = torch.cat(branch_outs, dim=1)
        out += res
        out = self.act(out)
        return out


class LinearSelfAttentionTemporal(nn.Module):
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size=3,
        stride=1,
        dilation=1,
        n_head=8,
        dropout=0,
        activation="relu6",
    ):
        super().__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.n_head = n_head
        self.head_dim = out_channels // n_head

        # 使用带膨胀的时间卷积替代简单的1x1卷积
        pad = (kernel_size + (kernel_size - 1) * (dilation - 1) - 1) // 2
        self.c_attn = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=(kernel_size, 1),
            padding=(pad, 0),
            stride=(stride, 1),
            dilation=(dilation, 1),
        )

        self.c_proj = nn.Conv2d(out_channels, out_channels, kernel_size=1)

        # TSSA特定参数
        self.temp = nn.Parameter(torch.ones(n_head, 1))
        self.denom_bias = nn.Parameter(torch.zeros(n_head, 1, 1))

        # 正则化
        self.attn_dropout = nn.Dropout(dropout)
        self.resid_dropout = nn.Dropout(dropout)

        # 激活函数
        self.act = activation_factory(activation)

    def forward(self, x):
        # 保留输入
        N, C, T, V = x.size()

        # 投影输入（使用带膨胀的卷积）
        w = self.c_attn(x)  # (N,C',T',V)
        _, _, T, _ = w.size()  # 更新T为卷积后的值

        # 与普通TSSA类似的后续处理...
        w = w.view(N, self.n_head, self.head_dim, T, V)
        w = w.permute(0, 1, 3, 4, 2)  # (N, n_head, T, V, head_dim)
        w = w.reshape(N, self.n_head, T * V, self.head_dim)

        # 线性注意力计算
        w_sq = w**2
        denom = torch.cumsum(w_sq, dim=2).clamp_min(1e-12)
        w_normed = (w_sq / denom) + self.denom_bias

        tmp = torch.sum(w_normed, dim=-1) * self.temp
        Pi = F.softmax(tmp, dim=1)

        dots = torch.cumsum(w_sq * Pi.unsqueeze(-1), dim=2) / (
            Pi.cumsum(dim=2).clamp_min(1e-8)
        ).unsqueeze(-1)

        attn = 1.0 / (1 + dots)
        attn = self.attn_dropout(attn)

        y = -torch.mul(w.mul(Pi.unsqueeze(-1)), attn)

        # 重塑回原始格式
        y = y.reshape(N, self.n_head, T, V, self.head_dim)
        y = y.permute(0, 1, 4, 2, 3).reshape(N, self.out_channels, T, V)

        # 输出投影
        y = self.c_proj(y)
        y = self.resid_dropout(y)

        # 激活
        y = self.act(y)

        return y


class MultiScale_LinearSelfAttention(nn.Module):
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size=3,
        stride=1,
        dilations=[1, 2, 3, 4],  # 保持与MS-TCN一致的膨胀率
        n_head=8,
        dropout=0.1,
        residual=False,
        activation="relu6",
    ):
        super().__init__()
        # 确保输出通道数可以被分支数整除
        assert out_channels % (len(dilations) + 1) == 0, "输出通道数应是分支数的倍数"

        # 计算每个分支的通道数
        self.num_branches = len(dilations) + 1
        branch_channels = out_channels // self.num_branches

        # 创建线性自注意力分支，每个分支有不同的膨胀率
        self.branches = nn.ModuleList()

        # 带有不同膨胀率的TSSA分支
        for dilation in dilations:
            self.branches.append(
                nn.Sequential(
                    nn.Conv2d(in_channels, branch_channels, kernel_size=1),
                    nn.BatchNorm2d(branch_channels),
                    activation_factory(activation),
                    # 修改后的TSSA，支持膨胀卷积
                    LinearSelfAttentionTemporal(
                        branch_channels,
                        branch_channels,
                        kernel_size=kernel_size,
                        stride=stride,
                        dilation=dilation,
                        n_head=n_head,
                        dropout=dropout,
                    ),
                )
            )

        # 额外添加最大池化分支
        self.branches.append(
            nn.Sequential(
                nn.Conv2d(in_channels, branch_channels, kernel_size=1),
                nn.BatchNorm2d(branch_channels),
                activation_factory(activation),
                nn.MaxPool2d(kernel_size=(3, 1), stride=(stride, 1), padding=(1, 0)),
                nn.BatchNorm2d(branch_channels),
            )
        )

        # 残差连接
        if not residual:
            self.residual = lambda x: 0
        elif (in_channels == out_channels) and (stride == 1):
            self.residual = lambda x: x
        else:
            self.residual = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1),
                nn.AvgPool2d(kernel_size=(stride, 1), stride=(stride, 1))
                if stride > 1
                else nn.Identity(),
            )

        self.act = activation_factory(activation)

    def forward(self, x):
        # 输入维度: (N,C,T,V)
        res = self.residual(x)

        # 应用各分支
        branch_outs = []
        for branch in self.branches:
            out = branch(x)
            branch_outs.append(out)

        # 沿通道维度连接所有分支输出
        out = torch.cat(branch_outs, dim=1)

        # 残差连接和激活
        out += res
        out = self.act(out)

        return out


if __name__ == "__main__":
    mstcn = MultiScale_TemporalConv(288, 288)
    x = torch.randn(32, 288, 100, 20)
    mstcn.forward(x)
    for name, param in mstcn.named_parameters():
        print(f"{name}: {param.numel()}")
    print(sum(p.numel() for p in mstcn.parameters() if p.requires_grad))
