import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from ..base_model import BaseModel
from ..modules import SetBlockWrapper, BasicConv2d
from ..basic_blocks import SetBlock, GFA, CvT_layer
from ..self_attention import Attention
from ..gcn import Graph

# Csquare与MSAFF融合，损失计算问题


class MsaffGait6LC2(BaseModel):
    def __init__(self, cfgs, is_training):
        super().__init__(cfgs, is_training)

    def _init_feature_processor(self, in_channels=32, squeeze_ratio=16):
        """将原FeatureProcessor的组件整合到此类中"""
        hidden_dim = in_channels // squeeze_ratio

        # CSquare Attention layers
        self.conv1 = nn.Sequential(
            nn.Conv1d(in_channels, hidden_dim, kernel_size=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True),
            nn.Conv1d(hidden_dim, hidden_dim, kernel_size=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True),
            nn.Conv1d(hidden_dim, in_channels, kernel_size=1),
        )
        self.conv2 = nn.Sequential(
            nn.Conv1d(in_channels, hidden_dim, kernel_size=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True),
            nn.Conv1d(hidden_dim, hidden_dim, kernel_size=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True),
            nn.Conv1d(hidden_dim, in_channels, kernel_size=1),
        )

        # CatFusion层
        self.fusion_conv = nn.ConvTranspose1d(
            in_channels * 3,
            in_channels,
            kernel_size=3,
            stride=2,
            padding=1,
            output_padding=1
        )

    def _feature_processor(self, x1, x2):
        """实现原FeatureProcessor的前向逻辑"""
        min_batch = min(x1.shape[0], x2.shape[0])
        x1, x2 = x1[:min_batch], x2[:min_batch]

        attn_x1 = self.conv1(x1)
        attn_x2 = self.conv2(x2)
        attn_x = torch.stack((attn_x1, attn_x2), dim=1)
        attn_x = F.softmax(attn_x, dim=1)
        attn_x1_softmax = attn_x[:, 0, ...]
        attn_x2_softmax = attn_x[:, 1, ...]
        attn = torch.min(attn_x1_softmax, attn_x2_softmax)

        out = (x1 + x2) / 2 * attn
        fused_out = self.fusion_conv(
            torch.cat([x1 * attn_x1_softmax, out, x2 * attn_x2_softmax], dim=1)
        )
        return fused_out


    def build_network(self, model_cfg):
        graph = Graph("coco")
        A = torch.tensor(graph.A, dtype=torch.float32, requires_grad=False)
        self.register_buffer('A', A)

        self.hidden_dim = model_cfg['hidden_dim']
        self.part_img = model_cfg['part_img']
        self.part_ske = model_cfg['part_ske']
        _set_in_channels_img = model_cfg['set_in_channels_img']
        _set_in_channels_ske = model_cfg['set_in_channels_ske']
        _set_channels = model_cfg['set_channels']

        # 初始化特征处理器组件
        self._init_feature_processor(in_channels=_set_channels[0])

        # F 网络定义（Silhouette 分支）
        self.set_block1 = nn.Sequential(BasicConv2d(_set_in_channels_img, _set_channels[0], 5, 1, 2),
                                        # 卷积层1: 1→32通道，5x5卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        BasicConv2d(_set_channels[0], _set_channels[0], 3, 1, 1),
                                        # 卷积层2: 32→32通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        nn.MaxPool2d(kernel_size=2, stride=2))  # 最大池化层（下采样） MaxPool2d: 2x2池化 → 尺寸减半

        self.set_block2 = nn.Sequential(BasicConv2d(_set_channels[0], _set_channels[1], 3, 1, 1),
                                        # 卷积层3: 32→64通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        BasicConv2d(_set_channels[1], _set_channels[1], 3, 1, 1),
                                        # 卷积层4: 64→64通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        nn.MaxPool2d(kernel_size=2, stride=2))  # 最大池化层（下采样） MaxPool2d: 2x2池化 → 尺寸减半

        self.set_block3 = nn.Sequential(BasicConv2d(_set_channels[1], _set_channels[2], 3, 1, 1),
                                        # 卷积层5: 64→128通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        BasicConv2d(_set_channels[2], _set_channels[2], 3, 1, 1),
                                        # 128→128通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True))  # 输出尺寸经过两次池化后变为 H/4 × W/4

        self.set_block1 = SetBlockWrapper(self.set_block1)
        self.set_block2 = SetBlockWrapper(self.set_block2)
        self.set_block3 = SetBlockWrapper(self.set_block3)


        # GT 网络定义（Skeleton 分支）
        self.layer1 = SetBlock(
            CvT_layer(image_size=(1, 17), in_channels=_set_in_channels_ske, dim=_set_channels[0], heads=1, A=A, depth=1,
                      kernels=1,
                      strides=1, pad=0), pooling=False)  # 3→32通道
        self.layer2 = SetBlock(
            CvT_layer(image_size=(1, 17), in_channels=_set_channels[0], dim=_set_channels[0], heads=2, A=A, depth=2,
                      kernels=1,
                      strides=1, pad=0), pooling=False)  # 32→32通道
        # CvT_layer 是实现图卷积 Transformer 的核心模块，通过 A（邻接矩阵）建模骨骼节点关系
        self.layer3 = SetBlock(
            CvT_layer(image_size=(1, 17), in_channels=_set_channels[1], dim=_set_channels[0], heads=4, A=A, depth=2,
                      kernels=1,
                      strides=1, pad=0), pooling=False)  # 32→128通道

        # C²Fusion模块
        # self.csquare = FeatureProcessor().cuda()

        # 全局聚合（GMPA）
        self.set_pool0 = GFA(self.part_img, _set_channels[2], _set_channels[2])  # 多通道池化
        self.set_pool1 = GFA(self.part_ske, _set_channels[2], _set_channels[2])
        self.set_pool2 = GFA(self.part_img, _set_channels[2], _set_channels[2])

        # 动态注意力实现（LCMB）
        self.atten = Attention(_set_channels[2])  # 跨模态注意力
        self.atten1 = Attention(_set_channels[2])  # 模态内注意力

        # fc_bin 参数将不同模态特征映射到统一空间
        self.fc_bin = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_img * 3, _set_channels[2] * 2, self.hidden_dim)))  #
        self.fc_bin1 = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_img * 3, _set_channels[2], self.hidden_dim)))  #
        self.fc_bin2 = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_img * 3, _set_channels[2], self.hidden_dim)))
        self.fc_bin3 = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_ske * 3, _set_channels[2], self.hidden_dim)))
        self.full = BasicConv2d(_set_channels[2] * 2, _set_channels[2], 1, 1, 0)

    # 对特征进行均值池化和最大池化融合
    def hp(self, f):
        feature = f.mean(0) + f.max(0)[0]
        return feature

    # 对骨骼特征进行池化与形状对齐，以适配图像特征的维度，从而实现跨模态融合
    def ske_hp(self, f, view):
        f = self.hp(f).expand(view.size())
        return f

    def forward(self, inputs):
        ipts, labs, _, _, seqL = inputs

        #  输入预处理
        #  n 是批量大小，s 是帧数，h 和 w 是图像的高度和宽度
        sils = ipts[0][0]  # 输入1：步态轮廓图（Silhouette）[n,s,h,w] [32,30,64,44]  对应：X_img
        #  n 是批量大小，s 是帧数，z 是关节的数量， c 是通道数量
        pose = ipts[1][0]  # 输入2：骨骼关键点（Pose） [n,s,z,c] [32,30,17,3]        对应：X_ske

        # 插入通道
        x = sils.unsqueeze(2)  # [n,s,h,w] ------> [n,s,1,h,w] [32,30,1,64,44]
        y = pose.unsqueeze(2).permute(0, 1, 4, 2, 3)  # [n,s,z,c] ------> [n,s,1,z,c] ------> [n,s,c,1,z] [32,30,3,1,17]

        x_1_s = self.set_block1(x)  # [n,s,c,h,w] ------> [n,s,c,h/2,w/2]  [32,30,32,32,22]
        x_1_s = self.set_block2(x_1_s)  # [n,s,c,h/2,w/2] ------> [n,s,c,h/4,w/4]  [32,30,64,16,11]
        x_1_s = self.set_block3(x_1_s).permute(4, 3, 0, 2,
                                               1)  # [n,s,c,h/4,w/4] ------> [n,s,c,h/4,w/4] ------> [w/4,h/4,n,c,s] [11,16,32,128,30]

        x_1_s = self.hp(x_1_s)  # [w/4,h/4,n,c,s] ------> [h/4,n,c,s]  [16,32,128,30](w维度被池化) 对应：S_img 处理后的轮廓特征
        x_1 = self.set_pool0(x_1_s)  # [h/4,n,c,s] ------> [3*(h/4),n,c]  [48,32,128](分3部分池化) 对应：P_img 轮廓部分级特征

        ######  step 2  ##########
        #  [n,s,c,1,z] ------> [n,s,2c,1,z] ------> [n,s,4c,1,z] ------> [z,1,n,128,s] ------> [z,n,128,s]

        # y_1_s = self.layer3(self.layer2(self.layer1(y))).permute(4, 3, 0, 2, 1).contiguous().squeeze(1) # [n,s,c,1,z] ------> [z,n,c,s]

        # Skeleton分支处理
        y_1_s = self.layer1(y)  # [n,s,c,1,z] ------> [n,s,32,1,z] [32,30,32,1,17]
        y_1_s = self.layer2(y_1_s)  # [n,s,32,1,z] ------> [n,s,64,1,z] [32,30,64,1,17]
        y_1_s = self.layer3(y_1_s)  # [n,s,64,1,z] ------> [n,s,128,1,z] [32,30,128,1,17]
        y_1_s = y_1_s.permute(4, 3, 0, 2, 1)  # [n,s,128,1,z] ------> [z,1,n,128,s] [17,1,32.128.30]
        y_1_s = y_1_s.contiguous().squeeze(1)  # [n,s,128,1,z] ------> [z,n,128,s] [17,32,128,30]   对应：S_ske 处理后的骨骼特征

        y_1 = self.set_pool1(y_1_s)  # [z,n,c,s] ------> [3*z,n,c]  骨骼特征池化 [51,32, 128] (分3部分池化) 对应：P_ske 骨骼部分级特征

        # C²Fusion处理
        # x_2 = self.csquare(x_1, y_1)
        x_2 = self._feature_processor(x_1, y_1)
        assert x_2.requires_grad, "C²Fusion output does not have gradient!"

        # 图像与骨骼特征融合 即 AFFM（自适应特征融合模块）  对应：ST_fs（帧级融合特征）
        # x_2 = torch.cat([x_1, self.atten1(x_1, y_1) + self.ske_hp(y_1, x_1)],
        #                 2)  # [3*(h/4),n,c]+[3*z,n,c] ------> [3*(h/4),n,2*c] (h=原始高度，h/4=16，3h/4=48) [48,32, 256]

        p, n, c, s = x_1_s.size()  # [h/4,n,c,s] ------> [p,n,c,s] [16,32,128,30]
        k, n, c, s = y_1_s.size()  # [z,n,c,s] ------> [k,n,c,s] [17,32,128,30]

        # 特征拼接与映射（GMPA 的全局模式生成），复数向量和相位编码未显式实现，而是通过注意力机制和特征拼接间接模拟动态关系。

        # MSSTFE（多尺度时空特征提取器） [16, n, 512, s]  对应：ST_fst 时空级特征融合
        x_3 = torch.cat([x_1_s,  # 对应：ST_img和ST_ske，通过 MSSTFE_img 提取的轮廓时空特征
                         self.atten(
                             x_1_s.permute(0, 1, 3, 2).contiguous().view(p, n * s, c),
                             # [h/4,n,c,s] ------> [h/4,n,s,c] ------> [h/4,n*s,c] [16,32*30(960),128]
                             y_1_s.permute(0, 1, 3, 2).contiguous().view(k, n * s, c)
                             # [z,n,c,s] ------> [z,n,s,c]  ------> [z,n*s,c] [17,32*30(960),128]
                         ).view(p, n, s, c).permute(0, 1, 3, 2).contiguous() + self.ske_hp(y_1_s, x_1_s)],
                        2)  # [p, n, s + c, c]   [16, n, 128, s] -> [16, n, 128, s]

        # 全连接层调整 对应：FD Pooling  self.full（降维卷积） set_pool2（池化操作）
        x_3 = self.full(x_3.permute(1, 2, 0, 3).contiguous()).permute(2, 0, 1,
                                                                      3).contiguous()  # [n, 512, 16, s] ------> [n, 256, 16, s](1x1卷积降维) ------> [16, n, 256, s] ------> [48, n, 256] (分3部分池化)

        x_3 = self.set_pool2(x_3)  # [48, n, 256] (分3部分池化)

        # 特征映射与拼接（为损失函数准备输入）
        x_2 = x_2.matmul(self.fc_bin)  # [48, n, 256] × [48, 512, d] -> [48, n, d] # [48,32,256]
        x_3 = x_3.matmul(self.fc_bin1)  # [48, n, 256] × [48, 256, d] -> [48, n, d] # [48,32,256]
        x_1 = x_1.matmul(self.fc_bin2)  # [48, n, 256] × [48, 256, d] -> [48, n, d] # [48,32,256]
        x_4 = y_1.matmul(self.fc_bin3)  # [51, n, 256] × [51, 256, d] -> [51, n, d] # [51,32,256]

        print(self.fc_bin.grad, self.fc_bin1.grad, self.fc_bin2.grad, self.fc_bin3.grad)

        print(x_2.requires_grad, x_3.requires_grad, x_1.requires_grad, x_4.requires_grad)


        # embed_1 用于联合损失计算
        embed_1 = torch.cat([x_1, x_2, x_3, x_4], 0)  # [48+48+48+51=195, n, d] [48+48+48+51=195,32,256]
        # 全局融合
        embed_1 = embed_1.permute(1, 0, 2).contiguous()  # [n, p, c]  [32, 195, 256]

        n, s, c, h, w = x.size()  # n=32 s=30 c=1 h=64 w=44


        # 输出用于损失计算
        retval = {
            'training_feat': {
                'triplet': {'embeddings': embed_1, 'labels': labs},
            },
            'visual_summary': {
                'image/sils': x.view(n * s, 1, h, w)
            },
            'inference_feat': {
                'embeddings': embed_1
            }
        }
        for name, param in self.named_parameters():
            if param.requires_grad and param.grad is None:
                print(f"⚠️ Parameter {name} did not receive gradients!")
        return retval

