import torch
from torch import nn
import torch.nn.functional as F
# 导入外部自定义模块：音频引导注意力机制、Transformer编码器/解码器层、双LSTM模型
from .models import New_Audio_Guided_Attention
from .models import EncoderLayer, Encoder, DecoderLayer, Decoder
from torch.nn import MultiheadAttention
from .Dual_lstm import Dual_lstm


class RNNEncoder(nn.Module):
    """
    基础RNN编码器类：使用双向LSTM分别对音频和视觉序列特征进行编码
    主要用于提取单模态的时序特征，为后续跨模态交互提供基础特征
    """
    def __init__(self, audio_dim, video_dim, d_model, num_layers):
        super(RNNEncoder, self).__init__()

        self.d_model = d_model  # 模型特征维度（统一输出维度）

        # 音频双向LSTM：输入维度audio_dim，隐藏层维度d_model/2（双向拼接后为d_model）
        self.audio_rnn = nn.LSTM(audio_dim, int(d_model / 2), num_layers=num_layers, batch_first=True,
                                 bidirectional=True, dropout=0.2)
        # 视觉双向LSTM：输入维度video_dim，隐藏层维度d_model（双向拼接后为2*d_model？此处需注意与后续模块适配）
        self.visual_rnn = nn.LSTM(video_dim, d_model, num_layers=num_layers, batch_first=True, bidirectional=True,
                                  dropout=0.2)

    def forward(self, audio_feature, visual_feature):
        """
        前向传播：对音频和视觉序列分别进行LSTM编码
        Args:
            audio_feature: 音频序列特征 (batch_size, seq_len, audio_dim)
            visual_feature: 视觉序列特征 (batch_size, seq_len, video_dim)
        Returns:
            audio_output: 音频LSTM编码输出 (batch_size, seq_len, d_model)
            video_output: 视觉LSTM编码输出 (batch_size, seq_len, 2*d_model)
        """
        audio_output, _ = self.audio_rnn(audio_feature)  # 忽略LSTM的最终隐藏状态和细胞状态
        video_output, _ = self.visual_rnn(visual_feature)
        return audio_output, video_output


class InternalTemporalRelationModule(nn.Module):
    """
    内部时序关系模块：使用Transformer编码器提取单模态内部的时序依赖关系
    通过线性层将输入特征映射到统一维度，再经编码器捕捉长时时序信息
    """
    def __init__(self, input_dim, d_model, feedforward_dim):
        super(InternalTemporalRelationModule, self).__init__()
        # Transformer编码器（2层）：用于捕捉时序关系，nhead=4表示多头注意力头数
        self.encoder_layer = EncoderLayer(d_model=d_model, nhead=4, dim_feedforward=feedforward_dim)
        self.encoder = Encoder(self.encoder_layer, num_layers=2)

        # 线性层：将输入特征维度映射到模型统一维度d_model
        self.affine_matrix = nn.Linear(input_dim, d_model)
        self.relu = nn.ReLU(inplace=True)  # 激活函数（当前forward中未使用，预留）

    def forward(self, feature):
        """
        前向传播：处理单模态特征，提取内部时序关系
        Args:
            feature: 单模态序列特征 (seq_len, batch_size, input_dim) （Transformer输入格式）
        Returns:
            编码后的单模态特征 (seq_len, batch_size, d_model)
        """
        feature = self.affine_matrix(feature)  # 维度映射：input_dim -> d_model
        feature = self.encoder(feature)  # Transformer编码器捕捉时序关系
        return feature


class CrossModalRelationAttModule(nn.Module):
    """
    跨模态关系注意力模块：使用Transformer解码器实现跨模态特征交互
    以一种模态为查询（query），另一种模态为记忆（memory），通过注意力机制融合跨模态信息
    """
    def __init__(self, input_dim, d_model, feedforward_dim):
        super(CrossModalRelationAttModule, self).__init__()
        # Transformer解码器（1层）：用于跨模态注意力融合
        self.decoder_layer = DecoderLayer(d_model=d_model, nhead=4, dim_feedforward=feedforward_dim)
        self.decoder = Decoder(self.decoder_layer, num_layers=1)

        # 线性层：将查询模态特征映射到统一维度d_model
        self.affine_matrix = nn.Linear(input_dim, d_model)
        self.relu = nn.ReLU(inplace=True)  # 激活函数（当前forward中未使用，预留）

    def forward(self, query_feature, memory_feature):
        """
        前向传播：跨模态注意力融合
        Args:
            query_feature: 查询模态特征 (seq_len, batch_size, input_dim)
            memory_feature: 记忆模态特征 (seq_len, batch_size, d_model) （已编码到d_model维度）
        Returns:
            跨模态融合后的查询模态特征 (seq_len, batch_size, d_model)
        """
        query_feature = self.affine_matrix(query_feature)  # 查询特征维度映射
        output = self.decoder(query_feature, memory_feature)  # 跨模态注意力解码
        return output


class CAS_Module(nn.Module):
    """
    类别激活序列（Class Activation Sequence）模块：用于生成时序上的类别激活特征
    通过1D卷积将特征映射到类别维度，为后续事件定位提供类别相关的时序信号
    """
    def __init__(self, d_model, num_class=28):
        super(CAS_Module, self).__init__()
        self.d_model = d_model  # 输入特征维度
        self.num_class = num_class  # 事件类别数（不含背景类）
        self.dropout = nn.Dropout(0.2)  # Dropout层防止过拟合

        # 分类器：1D卷积将特征映射到（类别数+1）维（+1可能用于背景类）
        self.classifier = nn.Sequential(
            nn.Conv1d(in_channels=d_model, out_channels=self.num_class+1, kernel_size=1, stride=1, padding=0, bias=False)
        )

    def forward(self, content):
        """
        前向传播：生成类别激活序列
        Args:
            content: 融合后的特征 (batch_size, seq_len, d_model)
        Returns:
            类别激活序列 (batch_size, seq_len, num_class+1)
        """
        content = content.permute(0, 2, 1)  # 调整维度适配1D卷积：(batch, d_model, seq_len)
        out = self.classifier(content)  # 卷积计算类别激活
        out = out.permute(0, 2, 1)  # 恢复原始维度顺序：(batch, seq_len, num_class+1)
        return out


class SupvLocalizeModule(nn.Module):
    """
    有监督定位模块：用于有监督学习场景下的事件定位和类别预测
    输出每个时间步的事件置信度（start/end定位）和整体事件类别
    """
    def __init__(self, d_model):
        super(SupvLocalizeModule, self).__init__()
        self.relu = nn.ReLU(inplace=True)  # 激活函数
        # 置信度分类器：预测每个时间步是否为事件（用于start/end定位）
        self.classifier = nn.Linear(d_model, 1)
        # 事件类别分类器：基于全局最大池化特征预测事件类别
        self.event_classifier = nn.Linear(d_model, 28)

    def forward(self, fused_content):
        """
        前向传播：事件定位与类别预测
        Args:
            fused_content: 融合后的特征 (seq_len, batch_size, d_model)
        Returns:
            logits: 每个时间步的事件置信度 (seq_len, batch_size, 1)
            class_scores: 事件类别预测分数 (batch_size, 28)
        """
        # 全局最大池化提取全局特征：(batch_size, d_model)
        max_fused_content, _ = fused_content.transpose(1, 0).max(1)
        logits = self.classifier(fused_content)  # 时间步置信度预测
        class_logits = self.event_classifier(max_fused_content)  # 全局类别预测
        class_scores = class_logits  # 类别分数（后续可加softmax）
        return logits, class_scores


class WeaklyLocalizationModule(nn.Module):
    """
    弱监督定位模块：用于弱监督学习场景下的事件定位和类别预测
    无需精确时间标注，通过门控机制融合时序置信度和类别分数
    """
    def __init__(self, input_dim):
        super(WeaklyLocalizationModule, self).__init__()
        self.hidden_dim = input_dim  # 输入特征维度（需与d_model一致）
        # 事件置信度分类器：预测每个时间步是否为事件
        self.classifier = nn.Linear(self.hidden_dim, 1)
        # 事件类别分类器：预测事件类别（29类含背景）
        self.event_classifier = nn.Linear(self.hidden_dim, 29)
        self.softmax = nn.Softmax(dim=-1)  # 类别分数归一化

    def forward(self, fused_content):
        """
        前向传播：弱监督事件定位与类别预测
        Args:
            fused_content: 融合后的特征 (seq_len, batch_size, hidden_dim)
        Returns:
            is_event_scores: 时间步事件置信度 (batch_size, seq_len)
            raw_logits: 原始类别分数 (batch_size, 29)
            event_scores: 归一化后的类别分数 (batch_size, 29)
        """
        fused_content = fused_content.transpose(0, 1)  # 维度调整：(batch, seq_len, hidden_dim)
        max_fused_content, _ = fused_content.max(1)  # 全局最大池化提取全局特征
        is_event_scores = self.classifier(fused_content)  # 时间步置信度 (batch, seq_len, 1)
        raw_logits = self.event_classifier(max_fused_content)[:, None, :]  # 原始类别分数 (batch, 1, 29)
        fused_logits = is_event_scores.sigmoid() * raw_logits  # 门控融合：置信度加权类别分数
        logits, _ = torch.max(fused_logits, dim=1)  # 时序最大池化适配弱监督标签
        event_scores = self.softmax(logits)  # 类别分数归一化
        return is_event_scores.squeeze(), raw_logits.squeeze(), event_scores


class AudioVideoInter(nn.Module):
    """
    音视频交互模块：通过多头注意力机制实现音频和视频特征的深度交互
    构建全局交互特征，融合两种模态的互补信息
    """
    def __init__(self, d_model, n_head, head_dropout=0.1):
        super(AudioVideoInter, self).__init__()
        self.dropout = nn.Dropout(0.1)  # Dropout层防止过拟合
        # 多头注意力层：用于音视频特征交互
        self.video_multihead = MultiheadAttention(d_model, num_heads=n_head, dropout=head_dropout)
        self.norm1 = nn.LayerNorm(d_model)  # 层归一化稳定训练

    def forward(self, video_feat, audio_feat):
        """
        前向传播：音视频特征交互融合
        Args:
            video_feat: 视频特征 (seq_len, batch_size, d_model)
            audio_feat: 音频特征 (seq_len, batch_size, d_model)
        Returns:
            交互融合后的特征 (seq_len, batch_size, d_model)
        """
        global_feat = video_feat * audio_feat  # 元素乘构建基础交互特征
        memory = torch.cat([audio_feat, video_feat], dim=0)  # 拼接两种模态作为注意力记忆库
        # 多头注意力计算：以全局交互特征为查询，记忆库为键值对
        mid_out = self.video_multihead(global_feat, memory, memory)[0]
        # 残差连接+Dropout+层归一化
        output = self.norm1(global_feat + self.dropout(mid_out))
        return output


class weak_main_model(nn.Module):
    """
    弱监督主模型：整合各模块实现弱监督场景下的多模态事件定位与分类
    核心流程：特征预处理→跨模态注意力融合→门控机制调节→类别激活→弱监督融合
    """
    def __init__(self, config):
        super(weak_main_model, self).__init__()
        self.config = config  # 模型配置字典
        self.beta = self.config["beta"]  # 音频引导注意力的权重参数
        self.alpha = self.config["alpha"]  # 模态门控的调节参数
        self.gamma = self.config["gamma"]  # 类别激活门控的权重参数

        # 音频引导的空间-通道注意力模块（移至GPU）
        self.spatial_channel_att = New_Audio_Guided_Attention(self.beta).cuda()
        # 输入特征维度配置
        self.video_input_dim = self.config["video_inputdim"]
        self.video_fc_dim = self.config["video_inputdim"]
        self.d_model = self.config["d_model"]
        self.audio_input_dim = self.config["audio_inputdim"]

        # 视觉特征适配层：将输入视觉特征映射到统一维度
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        self.relu = nn.ReLU()  # 激活函数
        self.dropout = nn.Dropout(0.1)  # Dropout层防止过拟合

        # 单模态时序编码器：提取各模态内部时序关系
        self.video_encoder = InternalTemporalRelationModule(input_dim=self.video_fc_dim, d_model=self.d_model, feedforward_dim=2048)
        self.audio_encoder = InternalTemporalRelationModule(input_dim=self.audio_input_dim, d_model=self.d_model, feedforward_dim=2048)
        # 跨模态解码器：实现模态间注意力融合
        self.video_decoder = CrossModalRelationAttModule(input_dim=self.video_fc_dim, d_model=self.d_model, feedforward_dim=1024)
        self.audio_decoder = CrossModalRelationAttModule(input_dim=self.audio_input_dim, d_model=self.d_model, feedforward_dim=1024)

        # 音视频交互模块（双向交互）
        self.AVInter = AudioVideoInter(self.d_model, n_head=2, head_dropout=0.2)
        self.VAInter = AudioVideoInter(self.d_model, n_head=2, head_dropout=0.2)

        # 模态门控：动态调节两种模态的贡献权重
        self.audio_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()  # 输出0-1之间的门控权重
        )
        self.video_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )

        # 类别激活模块：生成时序类别激活特征
        self.CAS_model = CAS_Module(d_model=self.d_model, num_class=28)
        # 辅助分类器（部分流程未使用）
        self.classifier = nn.Linear(self.d_model, 1)
        self.softmax = nn.Softmax(dim=-1)
        self.audio_cas = nn.Linear(self.d_model, 29)  # 音频类别激活层
        self.video_cas = nn.Linear(self.d_model, 29)  # 视频类别激活层

    def forward(self, visual_feature, audio_feature):
        """
        前向传播：弱监督多模态事件定位与分类
        Args:
            visual_feature: 视觉特征 (batch_size, seq_len, video_input_dim)
            audio_feature: 音频特征 (batch_size, seq_len, audio_input_dim)
        Returns:
            av_gate: 融合后门控权重 (batch_size, seq_len)
            raw_logits: 原始类别分数 (batch_size, 29)
            event_scores: 归一化类别分数 (batch_size, 29)
        """
        # 音频特征维度调整：(batch, seq_len, dim) → (seq_len, batch, dim)（适配Transformer）
        audio_feature = audio_feature.transpose(1, 0).contiguous()
        # 视觉特征预处理：线性映射+激活+Dropout
        visual_feature = self.v_fc(visual_feature)
        visual_feature = self.dropout(self.relu(visual_feature))

        # 空间-通道注意力：用音频特征引导视觉特征增强
        visual_feature = self.spatial_channel_att(visual_feature, audio_feature)
        visual_feature = visual_feature.transpose(1, 0).contiguous()  # 调整维度适配Transformer

        # 跨模态注意力融合1：以音频为查询，视频为记忆库
        video_key_value_feature = self.video_encoder(visual_feature)  # 视频编码为键值特征
        audio_query_output = self.audio_decoder(audio_feature, video_key_value_feature)  # 音频跨模态融合

        # 跨模态注意力融合2：以视频为查询，音频为记忆库
        audio_key_value_feature = self.audio_encoder(audio_feature)  # 音频编码为键值特征
        video_query_output = self.video_decoder(visual_feature, audio_key_value_feature)  # 视频跨模态融合

        # 计算模态门控权重
        audio_gate = self.audio_gated(video_key_value_feature)  # 基于视频特征的音频门控
        video_gate = self.video_gated(audio_key_value_feature)  # 基于音频特征的视频门控
        av_gate = (audio_gate + video_gate) / 2  # 融合门控权重
        av_gate = av_gate.permute(1, 0, 2)  # 调整维度适配后续计算

        # 门控调节模态特征
        video_query_output = (1 - self.alpha)*video_query_output + audio_gate * video_query_output * self.alpha
        audio_query_output = (1 - self.alpha)*audio_query_output + video_gate * audio_query_output * self.alpha

        # 生成各模态的类别激活分数
        video_cas = self.video_cas(video_query_output)
        audio_cas = self.audio_cas(audio_query_output)
        video_cas = video_cas.permute(1, 0, 2)  # 调整维度
        audio_cas = audio_cas.permute(1, 0, 2)
        # 门控激活（将类别分数转换为0-1权重）
        video_cas_gate = video_cas.sigmoid()
        audio_cas_gate = audio_cas.sigmoid()

        # 音视频深度交互融合
        video_query_output = self.AVInter(video_query_output, audio_query_output)
        audio_query_output = self.VAInter(audio_query_output, video_query_output)

        # 特征融合：平均融合两种模态的交互特征
        fused_content = (video_query_output + audio_query_output)/2
        fused_content = fused_content.transpose(0, 1)  # 调整维度适配CAS模块

        # 生成类别激活序列并结合门控权重
        cas_score = self.CAS_model(fused_content)
        cas_score = self.gamma*video_cas_gate*cas_score + self.gamma*audio_cas_gate*cas_score

        # 弱监督融合：Top-K平均提取关键时序特征
        sorted_scores, _ = cas_score.sort(descending=True, dim=1)
        topk_scores = sorted_scores[:, :4, :]  # 取Top-4高置信度时间步
        raw_logits = torch.mean(topk_scores, dim=1)[:, None, :]  # 平均得到原始类别分数
        # 门控加权类别分数并时序最大池化
        fused_logits = av_gate * raw_logits
        logits, _ = torch.max(fused_logits, dim=1)
        event_scores = self.softmax(logits)  # 类别分数归一化

        return av_gate.squeeze(), raw_logits.squeeze(), event_scores


class supv_main_model(nn.Module):
    """
    有监督主模型：整合各模块实现有监督场景下的多模态事件定位与分类
    核心流程：特征预处理→RNN编码→跨模态注意力融合→门控调节→有监督定位与分类
    """
    def __init__(self, config):
        super(supv_main_model, self).__init__()
        self.config = config  # 模型配置字典
        self.beta = self.config["beta"]  # 音频引导注意力权重参数
        # 音频引导的空间-通道注意力模块（移至GPU）
        self.spatial_channel_att = New_Audio_Guided_Attention(self.beta).cuda()

        # 输入特征维度配置
        self.video_input_dim = self.config['video_inputdim']
        self.audio_input_dim = self.config['audio_inputdim']
        self.video_fc_dim = 512  # 视觉特征适配维度
        self.d_model = self.config['d_model']  # 模型统一特征维度

        # 视觉特征适配层
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        self.relu = nn.ReLU()  # 激活函数
        self.dropout = nn.Dropout(0.2)  # Dropout层防止过拟合

        # 单模态时序编码器
        self.video_encoder = InternalTemporalRelationModule(input_dim=self.video_input_dim, d_model=self.d_model, feedforward_dim=1024)
        self.audio_encoder = InternalTemporalRelationModule(input_dim=self.d_model, d_model=self.d_model, feedforward_dim=1024)
        # 跨模态解码器
        self.video_decoder = CrossModalRelationAttModule(input_dim=self.video_input_dim, d_model=self.d_model, feedforward_dim=1024)
        self.audio_decoder = CrossModalRelationAttModule(input_dim=self.d_model, d_model=self.d_model, feedforward_dim=1024)

        # 基础RNN编码器（用于辅助特征提取）
        self.audio_visual_rnn_layer = RNNEncoder(audio_dim=self.audio_input_dim, video_dim=self.video_input_dim, d_model=self.d_model, num_layers=1)

        # 模态门控：动态调节模态贡献
        self.audio_gated = nn.Sequential(
                        nn.Linear(self.d_model, 1),
                        nn.Sigmoid()
                    )
        self.video_gated = nn.Sequential(
                        nn.Linear(self.d_model, 1),
                        nn.Sigmoid()
                    )

        # 音视频双向交互模块
        self.AVInter = AudioVideoInter(self.d_model, n_head=4, head_dropout=0.2)
        self.VAInter = AudioVideoInter(self.d_model, n_head=4, head_dropout=0.2)

        # 有监督定位模块
        self.localize_module = SupvLocalizeModule(self.d_model)
        # 层归一化：稳定训练
        self.video_norm = nn.LayerNorm(self.d_model)
        self.audio_norm = nn.LayerNorm(self.d_model)
        # 模态类别激活层
        self.audio_cas = nn.Linear(self.d_model, 28)
        self.video_cas = nn.Linear(self.d_model, 28)

        self.alpha = self.config['alpha']  # 模态门控调节参数
        self.gamma = self.config['gamma']  # 类别分数融合权重

    def forward(self, visual_feature, audio_feature):
        """
        前向传播：有监督多模态事件定位与分类
        Args:
            visual_feature: 视觉特征 (batch_size, seq_len, video_input_dim)
            audio_feature: 音频特征 (batch_size, seq_len, audio_input_dim)
        Returns:
            is_event_scores: 时间步事件置信度 (seq_len, batch_size, 1)
            event_scores: 事件类别分数 (batch_size, 28)
            audio_visual_gate: 模态融合门控权重 (seq_len, batch_size, 1)
            av_score: 模态融合类别分数 (batch_size, 28)
        """
        # 音频输入特征保存（用于RNN编码）
        audio_rnn_input = audio_feature
        # 音频特征维度调整：适配Transformer
        audio_feature = audio_feature.transpose(1, 0).contiguous()
        # 视觉特征预处理：线性映射+激活+Dropout
        visual_feature = self.v_fc(visual_feature)
        visual_feature = self.dropout(self.relu(visual_feature))

        # 空间-通道注意力：音频引导视觉特征增强
        visual_feature = self.spatial_channel_att(visual_feature, audio_feature)
        visual_rnn_input = visual_feature  # 视觉特征用于RNN编码

        # 基础RNN编码：提取音视频时序特征
        audio_rnn_output1, visual_rnn_output1 = self.audio_visual_rnn_layer(audio_rnn_input, visual_rnn_input)
        # 维度调整：适配Transformer
        audio_encoder_input1 = audio_rnn_output1.transpose(1, 0).contiguous()
        visual_encoder_input1 = visual_rnn_output1.transpose(1, 0).contiguous()

        # 跨模态注意力融合1：音频查询，视频为记忆库
        video_key_value_feature = self.video_encoder(visual_encoder_input1)
        audio_query_output = self.audio_decoder(audio_encoder_input1, video_key_value_feature)

        # 跨模态注意力融合2：视频查询，音频为记忆库
        audio_key_value_feature = self.audio_encoder(audio_encoder_input1)
        video_query_output = self.video_decoder(visual_encoder_input1, audio_key_value_feature)

        # 计算模态门控权重
        audio_gate = self.audio_gated(audio_key_value_feature)
        video_gate = self.video_gated(video_key_value_feature)
        audio_visual_gate = audio_gate * video_gate  # 模态交互门控

        # 门控调节模态特征
        video_query_output = video_query_output + audio_gate * video_query_output * self.alpha
        audio_query_output = audio_query_output + video_gate * audio_query_output * self.alpha

        # 生成各模态类别分数并进行Top-K平均
        video_cas = self.video_cas(video_query_output)
        audio_cas = self.audio_cas(audio_query_output)
        video_cas = video_cas.permute(1, 0, 2)
        audio_cas = audio_cas.permute(1, 0, 2)
        sorted_scores_video, _ = video_cas.sort(descending=True, dim=1)
        topk_scores_video = sorted_scores_video[:, :4, :]
        score_video = torch.mean(topk_scores_video, dim=1)
        sorted_scores_audio, _ = audio_cas.sort(descending=True, dim=1)
        topk_scores_audio = sorted_scores_audio[:, :4, :]
        score_audio = torch.mean(topk_scores_audio, dim=1)
        av_score = (score_video + score_audio) / 2  # 模态融合类别分数

        # 音视频深度交互融合
        video_query_output = self.AVInter(video_query_output, audio_query_output)
        audio_query_output = self.VAInter(audio_query_output, video_query_output)

        # 有监督定位与分类：融合特征后预测时间步置信度和类别
        is_event_scores, event_scores = self.localize_module((video_query_output + audio_query_output)/2)
        event_scores = event_scores + self.gamma*av_score  # 融合模态类别分数

        return is_event_scores, event_scores, audio_visual_gate, av_score