import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable

from ..base_model import BaseModel
from ..modules import SeparateBNNecks, SetBlockWrapper, BasicConv2d
from ..basic_blocks import SetBlock, GFA, CvT_layer
from ..self_attention import Attention
from ..gcn import Graph


import numpy as np  # 导入 NumPy 库，用于数值计算
import torch  # 导入 PyTorch 库，用于深度学习和张量操作
from torch import nn  # 从 PyTorch 中导入神经网络模块
from torch.nn import init  # 从 PyTorch 中导入初始化模块，用于权重初始化
from torch.nn import functional as F  # 导入 PyTorch 的函数式 API，用于在神经网络中应用各种功能

# 定义双重注意力模块 (DoubleAttention)
class DoubleAttention(nn.Module):

    def __init__(self, in_channels, c_m=128, c_n=128, reconstruct=True):
        super().__init__()

        # 初始化输入参数
        # in_channels: 输入的通道数
        # c_m: 第一个特征映射的通道数，默认为 128
        # c_n: 第二个特征映射的通道数，默认为 128
        # reconstruct: 是否重新构建输出的通道数，默认为 True
        self.in_channels = in_channels
        self.reconstruct = reconstruct
        self.c_m = c_m
        self.c_n = c_n
        # 定义三个 1x1 的卷积层
        self.convA = nn.Conv2d(in_channels, c_m, 1)  # 用于计算特征 A
        self.convB = nn.Conv2d(in_channels, c_n, 1)  # 用于计算注意力映射 B
        self.convV = nn.Conv2d(in_channels, c_n, 1)  # 用于计算注意力向量 V

        # 如果需要重新构建输出通道数，定义一个 1x1 的卷积层
        if self.reconstruct:
            self.conv_reconstruct = nn.Conv2d(c_m, in_channels, kernel_size=1)
        # 初始化权重
        self.init_weights()

    # 定义权重初始化函数
    def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):  # 对每个卷积层应用 He 正态分布初始化
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:  # 偏置初始化为 0
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):  # 对批量归一化层初始化
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):  # 对全连接层应用正态分布初始化
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    # 定义前向传播函数
    def forward(self, x):

        # 获取输入的形状
        b, c, h, w = x.shape
        assert c == self.in_channels  # 确保输入的通道数与定义的通道数一致
        # 通过三个卷积层计算特征 A、注意力映射 B 和注意力向量 V
        A = self.convA(x)  # 特征 A 的形状为 (b, c_m, h, w)
        B = self.convB(x)  # 注意力映射 B 的形状为 (b, c_n, h, w)
        V = self.convV(x)  # 注意力向量 V 的形状为 (b, c_n, h, w)
        # 重塑特征 A 为 (b, c_m, h*w)
        tmpA = A.view(b, self.c_m, -1)
        # 重塑并应用 softmax 到注意力映射 B，得到注意力权重，形状为 (b, c_n, h*w)
        attention_maps = F.softmax(B.view(b, self.c_n, -1), dim=-1)
        # 重塑并应用 softmax 到注意力向量 V，得到注意力权重，形状为 (b, c_n, h*w)
        attention_vectors = F.softmax(V.view(b, self.c_n, -1), dim=-1)
        # 第一步：特征门控
        # 计算特征 A 与注意力映射 B 的批量矩阵乘法，得到全局描述符，形状为 (b, c_m, c_n)
        global_descriptors = torch.bmm(tmpA, attention_maps.permute(0, 2, 1))
        # 第二步：特征分布
        # 将全局描述符与注意力向量 V 相乘，得到新的特征映射 Z，形状为 (b, c_m, h*w)
        tmpZ = global_descriptors.matmul(attention_vectors)
        # 重塑 Z 为 (b, c_m, h, w)
        tmpZ = tmpZ.view(b, self.c_m, h, w)

        # 如果需要重新构建输出通道数，应用卷积层
        if self.reconstruct:
            tmpZ = self.conv_reconstruct(tmpZ)
        # 返回计算后的输出
        return tmpZ


class MsaffGaitL11(BaseModel):
    def __init__(self, cfgs, is_training):
        super().__init__(cfgs, is_training)

    def build_network(self, model_cfg):

        graph = Graph("coco")
        A = torch.tensor(graph.A, dtype=torch.float32, requires_grad=False)
        self.register_buffer('A', A)

        self.hidden_dim = model_cfg['hidden_dim']
        self.part_img = model_cfg['part_img']
        self.part_ske = model_cfg['part_ske']
        _set_in_channels_img = model_cfg['set_in_channels_img']
        _set_in_channels_ske = model_cfg['set_in_channels_ske']
        _set_channels = model_cfg['set_channels']

        # F 网络定义（Silhouette 分支）
        self.set_block1 = nn.Sequential(BasicConv2d(_set_in_channels_img, _set_channels[0], 5, 1, 2), # 卷积层1: 1→32通道，5x5卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        BasicConv2d(_set_channels[0], _set_channels[0], 3, 1, 1), # 卷积层2: 32→32通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        nn.MaxPool2d(kernel_size=2, stride=2))  # 最大池化层（下采样） MaxPool2d: 2x2池化 → 尺寸减半

        self.set_block2 = nn.Sequential(BasicConv2d(_set_channels[0], _set_channels[1], 3, 1, 1), # 卷积层3: 32→64通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        BasicConv2d(_set_channels[1], _set_channels[1], 3, 1, 1), # 卷积层4: 64→64通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        nn.MaxPool2d(kernel_size=2, stride=2)) # 最大池化层（下采样） MaxPool2d: 2x2池化 → 尺寸减半
        
        self.set_block3 = nn.Sequential(BasicConv2d(_set_channels[1], _set_channels[2], 3, 1, 1),  # 卷积层5: 64→128通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True),
                                        BasicConv2d(_set_channels[2], _set_channels[2], 3, 1, 1),  # 128→128通道，3x3卷积 → 保持尺寸
                                        nn.LeakyReLU(inplace=True))  # 输出尺寸经过两次池化后变为 H/4 × W/4

        self.set_block1 = SetBlockWrapper(self.set_block1)
        self.set_block2 = SetBlockWrapper(self.set_block2)
        self.set_block3 = SetBlockWrapper(self.set_block3)

        # GT 网络定义（Skeleton 分支）
        self.layer1 = SetBlock(CvT_layer(image_size=(1, 17), in_channels=_set_in_channels_ske, dim=_set_channels[0], heads=1, A=A, depth=1, kernels=1,
                                         strides=1, pad=0), pooling=False) # 3→32通道
        self.layer2 = SetBlock(CvT_layer(image_size=(1, 17), in_channels=_set_channels[0], dim=_set_channels[0], heads=2, A=A, depth=2, kernels=1,
                                         strides=1, pad=0), pooling=False) # 32→32通道
        # CvT_layer 是实现图卷积 Transformer 的核心模块，通过 A（邻接矩阵）建模骨骼节点关系
        self.layer3 = SetBlock(CvT_layer(image_size=(1, 17), in_channels=_set_channels[1], dim=_set_channels[0], heads=4, A=A, depth=2, kernels=1,
                                         strides=1, pad=0), pooling=False) # 32→128通道

        # 全局聚合（GMPA）
        self.set_pool0 = MCM(self.part_img,  _set_channels[2],  _set_channels[2]) # 多通道池化
        self.set_pool1 = MCM(self.part_ske,  _set_channels[2],  _set_channels[2])
        self.set_pool2 = MCM(self.part_img,  _set_channels[2],  _set_channels[2])

        # 动态注意力实现（LCMB）
        self.atten = Attention(_set_channels[2]) # 跨模态注意力
        self.atten1 = Attention(_set_channels[2]) # 模态内注意力

        # fc_bin 参数将不同模态特征映射到统一空间
        self.fc_bin = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_img*3, _set_channels[2]*2, self.hidden_dim)))  #
        self.fc_bin1 = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_img*3, _set_channels[2], self.hidden_dim)))  #
        self.fc_bin2 = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_img*3, _set_channels[2], self.hidden_dim)))
        self.fc_bin3 = nn.Parameter(
            nn.init.xavier_uniform_(
                torch.zeros(self.part_ske*3, _set_channels[2], self.hidden_dim)))
        self.full = BasicConv2d(_set_channels[2]*2, _set_channels[2], 1, 1, 0)

    # 对特征进行均值池化和最大池化融合
    def hp(self, f):
        feature = f.mean(0) + f.max(0)[0]
        return feature

    # 对骨骼特征进行池化与形状对齐，以适配图像特征的维度，从而实现跨模态融合
    def ske_hp(self, f, view):
            f = self.hp(f).expand(view.size())
            return f

    def forward(self, inputs):
        ipts, labs, _, _, seqL = inputs

        #  输入预处理
        #  n 是批量大小，s 是帧数，h 和 w 是图像的高度和宽度
        sils = ipts[0][0]  # 输入1：步态轮廓图（Silhouette）[n,s,h,w] [32,30,64,44]
        #  n 是批量大小，s 是帧数，z 是关节的数量， c 是通道数量
        pose = ipts[1][0]  # 输入2：骨骼关键点（Pose） [n,s,z,c] [32,30,17,3]

        # 插入通道
        x = sils.unsqueeze(2)                          # [n,s,h,w] ------> [n,s,1,h,w] [32,30,1,64,44]
        y = pose.unsqueeze(2).permute(0, 1, 4, 2, 3)   #[n,s,z,c] ------> [n,s,1,z,c] ------> [n,s,c,1,z] [32,30,3,1,17]

        x_1_s = self.set_block1(x)                     # [n,s,c,h,w] ------> [n,s,c,h/2,w/2]  [32,30,32,32,22]
        x_1_s = self.set_block2(x_1_s)                 # [n,s,c,h/2,w/2] ------> [n,s,c,h/4,w/4]  [32,30,64,16,11]
        x_1_s = self.set_block3(x_1_s).permute(4, 3, 0, 2, 1)  # [n,s,c,h/4,w/4] ------> [n,s,c,h/4,w/4] ------> [w/4,h/4,n,c,s] [11,16,32,128,30]

        x_1_s = self.hp(x_1_s) # [w/4,h/4,n,c,s] ------> [h/4,n,c,s]  [16,32,128,30](w维度被池化)
        x_1 = self.set_pool0(x_1_s) # [h/4,n,c,s] ------> [3*(h/4),n,c]  [48,32,128](分3部分池化)

        ######  step 2  ##########
        #  [n,s,c,1,z] ------> [n,s,2c,1,z] ------> [n,s,4c,1,z] ------> [z,1,n,128,s] ------> [z,n,128,s]
        
        # y_1_s = self.layer3(self.layer2(self.layer1(y))).permute(4, 3, 0, 2, 1).contiguous().squeeze(1) # [n,s,c,1,z] ------> [z,n,c,s]

        # Skeleton分支处理
        y_1_s = self.layer1(y)  # [n,s,c,1,z] ------> [n,s,32,1,z] [32,30,32,1,17]
        y_1_s = self.layer2(y_1_s)  # [n,s,32,1,z] ------> [n,s,64,1,z] [32,30,64,1,17]
        y_1_s = self.layer3(y_1_s)  # [n,s,64,1,z] ------> [n,s,128,1,z] [32,30,128,1,17]
        y_1_s = y_1_s.permute(4, 3, 0, 2, 1)  # [n,s,128,1,z] ------> [z,1,n,128,s] [17,1,32.128.30]
        y_1_s = y_1_s.contiguous().squeeze(1)  # [n,s,128,1,z] ------> [z,n,128,s] [17,32,128,30]

        y_1 = self.set_pool1(y_1_s) # [z,n,c,s] ------> [3*z,n,c]  骨骼特征池化 [51,32,128] (分3部分池化)

        a2 = DoubleAttention(128).cuda()

        x_1_s = x_1_s.permute(3, 2, 1, 0).contiguous()
        y_1_s = y_1_s.permute(3, 2, 0, 1).contiguous()

        outx1 = a2(x_1_s)
        outy1 = a2(y_1_s)
        print(outx1.shape,outy1.shape)


        # 图像与骨骼特征融合
        x_2 = torch.cat([x_1,  self.atten1(x_1, y_1) + self.ske_hp(y_1, x_1)], 2) # [3*(h/4),n,c]+[3*z,n,c] ------> [3*(h/4),n,2*c] (h=原始高度，h/4=16，3h/4=48)

        p,n,c,s = x_1_s.size()  # [h/4,n,c,s] ------> [p,n,c,s] [16,32,128,30]
        k,n,c,s = y_1_s.size()  # [z,n,c,s] ------> [k,n,c,s] [17,32,128,30]

        # 特征拼接与映射（GMPA 的全局模式生成），复数向量和相位编码未显式实现，而是通过注意力机制和特征拼接间接模拟动态关系。

        # [16, n, 512, s]
        x_3 = torch.cat([x_1_s,
                         self.atten(
                             x_1_s.permute(0, 1, 3, 2).contiguous().view(p, n * s, c),  # [h/4,n,c,s] ------> [h/4,n,s,c] ------> [h/4,n*s,c] [16,32*30(960),128]
                             y_1_s.permute(0, 1, 3, 2).contiguous().view(k, n * s, c)   # [z,n,c,s] ------> [z,n,s,c]  ------> [z,n*s,c] [17,32*30(960),128]
                         ).view(p, n, s, c).permute(0, 1, 3, 2).contiguous() + self.ske_hp(y_1_s, x_1_s)], 2) #    [p, n, s + c, c]   [16, n, 128, s] -> [16, n, 128, s]
        # 全连接层调整
        x_3 = self.full(x_3.permute(1, 2, 0, 3).contiguous()).permute(2, 0, 1, 3).contiguous()  # [n, 512, 16, s] ------> [n, 256, 16, s](1x1卷积降维) ------> [16, n, 256, s] ------> [48, n, 256] (分3部分池化)
        x_3 = self.set_pool2(x_3) # [48, n, 256] (分3部分池化)

        # 特征映射与拼接（为损失函数准备输入）
        x_2 = x_2.matmul(self.fc_bin)  # [48, n, 256] × [48, 512, d] -> [48, n, d] # [48,32,256]
        x_3 = x_3.matmul(self.fc_bin1) # [48, n, 256] × [48, 256, d] -> [48, n, d] # [48,32,256]
        x_1 = x_1.matmul(self.fc_bin2) # [48, n, 256] × [48, 256, d] -> [48, n, d] # [48,32,256]
        x_4 = y_1.matmul(self.fc_bin3) # [51, n, 256] × [51, 256, d] -> [51, n, d] # [51,32,256]

        # embed_1 用于联合损失计算
        embed_1 = torch.cat([x_1, x_2, x_3, x_4], 0) # [48+48+48+51=195, n, d] [48+48+48+51=195,32,256]

        embed_1 = embed_1.permute(1, 0, 2).contiguous()  # [n, p, c]  [32, 195, 256]

        n, s, c, h, w = x.size() # n=32 s=30 c=1 h=64 w=44

        # 输出用于损失计算
        retval = {
            'training_feat': {
                'triplet': {'embeddings': embed_1, 'labels': labs},
            },
            'visual_summary': {
                'image/sils': x.view(n * s, 1, h, w)
            },
            'inference_feat': {
                'embeddings': embed_1
            }
        }
        return retval



