# 核心模型文件，包含弱监督和无监督两个版本

# 导入基本模块
import torch
from torch import nn
import torch.nn.functional as F
# 导入音频引导的空间-通道注意力模块
from .models import New_Audio_Guided_Attention
# 导入Transformer相关组件（编码器层、编码器、解码器层、解码器）
from .models import EncoderLayer, Encoder, DecoderLayer, Decoder
# 导入多头注意力模块
from torch.nn import MultiheadAttention
# 导入双向LSTM模块
from .Dual_lstm import Dual_lstm


class RNNEncoder(nn.Module):
    """
    基于双向LSTM的音频-视频特征编码器
    功能：分别对音频和视频特征进行时序建模，捕捉局部时序依赖关系
    """

    def __init__(self, audio_dim, video_dim, d_model, num_layers):
        super(RNNEncoder, self).__init__()

        self.d_model = d_model  # 输出特征维度

        # 音频双向LSTM：输入维度audio_dim，隐藏层维度d_model/2（双向拼接后为d_model）
        self.audio_rnn = nn.LSTM(
            audio_dim,
            int(d_model / 2),       # 双向LSTM的单方向隐藏层维度
            num_layers=num_layers,  # LSTM层数
            batch_first=True,       # 输入格式为[batch, seq_len, dim]
            bidirectional=True,     # 双向LSTM
            dropout=0.2             # 层间dropout
        )

        # 视频双向LSTM：输入维度video_dim，隐藏层维度d_model
        self.visual_rnn = nn.LSTM(
            video_dim,
            d_model,
            num_layers=num_layers,  # LSTM层数
            batch_first=True,       # 输入格式为[batch, seq_len, dim]
            bidirectional=True,     # 双向LSTM
            dropout=0.2             # 层间dropout
        )

    def forward(self, audio_feature, visual_feature):
        """
        前向传播：对音频和视频特征进行LSTM编码
        Args:
            audio_feature: 音频特征，shape=[batch, seq_len, audio_dim]
            visual_feature: 视频特征，shape=[batch, seq_len, video_dim]
        Returns:
            audio_output: 音频LSTM输出，shape=[batch, seq_len, d_model]
            video_output: 视频LSTM输出，shape=[batch, seq_len, 2*d_model]
        """
        audio_output, _ = self.audio_rnn(audio_feature)
        video_output, _ = self.visual_rnn(visual_feature)
        return audio_output, video_output


class InternalTemporalRelationModule(nn.Module):
    """
    模态内时序关系建模模块
    功能：基于Transformer编码器捕捉单一模态（音频/视频）的长时序依赖关系
    """
    def __init__(self, input_dim, d_model, feedforward_dim):
        super(InternalTemporalRelationModule, self).__init__()

        # Transformer编码器层：包含多头自注意力和前馈网络
        self.encoder_layer = EncoderLayer(
            d_model=d_model,                # 特征维度
            nhead=4,                        # 注意力头数
            dim_feedforward=feedforward_dim # 前馈网络隐藏层维度
        )

        # 堆叠2层编码器
        self.encoder = Encoder(self.encoder_layer, num_layers=2)

        # 线性层：将输入特征维度映射到d_model（适配Transformer输入）
        self.affine_matrix = nn.Linear(input_dim, d_model)
        self.relu = nn.ReLU(inplace=True)  # 激活函数

    def forward(self, feature):
        """
        前向传播：对单模态特征进行时序关系增强
        Args:
            feature: 单模态特征，shape=[seq_len, batch, input_dim]（Transformer要求的时序优先格式）
        Returns:
            编码后的特征，shape=[seq_len, batch, d_model]（增强了时序依赖）
        """
        # 维度映射：将输入特征调整为Transformer所需的d_model维度
        feature = self.affine_matrix(feature)
        # Transformer编码器处理：捕捉长时序关系
        feature = self.encoder(feature)

        return feature


class CrossModalRelationAttModule(nn.Module):
    """
    跨模态关系建模模块
    功能：基于Transformer解码器捕捉两种模态间的交互关系（如音频关注视频关键信息）
    """
    def __init__(self, input_dim, d_model, feedforward_dim):
        super(CrossModalRelationAttModule, self).__init__()

        # Transformer解码器层：包含多头交叉注意力和前馈网络
        self.decoder_layer = DecoderLayer(
            d_model=d_model,
            nhead=4,
            dim_feedforward=feedforward_dim
        )
        # 单层解码器（聚焦于跨模态交互）
        self.decoder = Decoder(self.decoder_layer, num_layers=1)

        # 线性层：将输入特征维度映射到d_model
        self.affine_matrix = nn.Linear(input_dim, d_model)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, query_feature, memory_feature):
        """
        前向传播：通过解码器让查询模态关注记忆模态的关键信息
        Args:
            query_feature: 查询模态特征（如音频），shape=[seq_len, batch, input_dim]
            memory_feature: 记忆模态特征（如视频），shape=[seq_len, batch, d_model]（已编码的特征）
        Returns:
            跨模态交互后的查询特征，shape=[seq_len, batch, d_model]
        """
        # 维度映射：调整查询特征维度
        query_feature = self.affine_matrix(query_feature)
        # 解码器处理：查询模态关注记忆模态，生成跨模态融合特征
        output = self.decoder(query_feature, memory_feature)

        return output


class CAS_Module(nn.Module):
    """
    类别激活序列（Class Activation Sequence）模块
    功能：生成时序上的类别响应分数，用于定位事件在时序上的激活位置
    """
    def __init__(self, d_model, num_class=28):
        super(CAS_Module, self).__init__()
        self.d_model = d_model          # 输入特征维度
        self.num_class = num_class      # 事件类别数（默认28类）
        self.dropout = nn.Dropout(0.2)  # 防止过拟合

        # 1D卷积层：将特征映射到类别维度
        self.classifier = nn.Sequential(
            nn.Conv1d(
                in_channels=d_model,
                out_channels=self.num_class+1,  # 输出维度：类别数+背景
                kernel_size=1,                  # 1x1卷积，用于维度转换
                stride=1,
                padding=0,
                bias=False
            )
        )

    def forward(self, content):
        """
        前向传播：生成时序类别激活分数
        Args:
            content: 融合融合后的特征，shape=[batch, seq_len, d_model]
        Returns:
            时序类别分数，shape=[batch, seq_len, num_class+1]
        """
        # 调整维度适应卷积输入：[batch, seq_len, d_model] → [batch, d_model, seq_len]
        content = content.permute(0, 2, 1)
        # 卷积计算类别分数
        out = self.classifier(content)
        # 恢复时序优先维度：[batch, num_class+1, seq_len] → [batch, seq_len, num_class+1]
        out = out.permute(0, 2, 1)

        return out


class SupvLocalizeModule(nn.Module):
    """
    有监督定位模块
    功能：在强标注数据（含事件位置和类别）下，预测事件的时序定位分数和类别
    """
    def __init__(self, d_model):
        super(SupvLocalizeModule, self).__init__()
        self.relu = nn.ReLU(inplace=True)

        # 定位分类器：预测时序上每个位置是否为事件起点/终点
        self.classifier = nn.Linear(d_model, 1)
        # 事件类别分类器：预测事件所属类别
        self.event_classifier = nn.Linear(d_model, 28)

    def forward(self, fused_content):
        """
        前向传播：生成定位分数和类别分数
        Args:
            fused_content: 跨模态融合特征，shape=[seq_len, batch, d_model]
        Returns:
            logits: 时序定位分数，shape=[seq_len, batch, 1]
            class_scores: 事件类别分数，shape=[batch, 28]
        """
        # 全局最大池化：提取融合特征中的关键信息（用于类别预测）
        max_fused_content, _ = fused_content.transpose(1, 0).max(1)  # [batch, d_model]
        # 计算时序定位分数（每个时刻的事件概率）
        logits = self.classifier(fused_content)
        # 计算事件类别分数
        class_logits = self.event_classifier(max_fused_content)
        class_scores = class_logits

        return logits, class_scores


class WeaklyLocalizationModule(nn.Module):
    """
    弱监督定位模块
    功能：在弱标注数据（仅知是否含事件，不知位置）下，通过分数融合实现事件分类
    """
    def __init__(self, input_dim):
        super(WeaklyLocalizationModule, self).__init__()

        # 输入特征维度（需与d_model一致）
        self.hidden_dim = input_dim
         # 事件存在分类器：预测时序上每个位置是否为事件
        self.classifier = nn.Linear(self.hidden_dim, 1)
        # 事件类别分类器：预测29类（28事件+1背景）
        self.event_classifier = nn.Linear(self.hidden_dim, 29)
        # 类别概率归一化
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, fused_content):
        """
        前向传播：融合事件存在分数和类别分数，生成最终分类结果
        Args:
            fused_content: 跨模态融合特征，shape=[seq_len, batch, hidden_dim]
        Returns:
            is_event_scores: 事件存在分数，shape=[batch, seq_len]
            raw_logits: 原始类别分数，shape=[batch, 29]
            event_scores: 最终事件类别概率，shape=[batch, 29]
        """
        # 调整维度：[seq_len, batch, hidden_dim] → [batch, seq_len, hidden_dim]
        fused_content = fused_content.transpose(0, 1)                       # [batch, seq_len, hidden_dim]
        # 全局最大池化：提取关键特征（用于类别预测）
        max_fused_content, _ = fused_content.max(1)                         # [batch, hidden_dim]
        
        # 计算每个时刻的事件存在分数
        is_event_scores = self.classifier(fused_content)                    # [batch, seq_len, 1]
        # 计算原始类别分数
        raw_logits = self.event_classifier(max_fused_content)[:, None, :]   # [batch, 1, 29]

        # 分数融合：事件存在概率（sigmoid）× 类别分数
        fused_logits = is_event_scores.sigmoid() * raw_logits               # [batch, seq_len, 29]
        # 弱监督适配：最大池化（因弱标注仅知是否存在，取最强响应）
        logits, _ = torch.max(fused_logits, dim=1)  # [batch, 29]
        # 归一化得到类别概率
        event_scores = self.softmax(logits)

        return is_event_scores.squeeze(), raw_logits.squeeze(), event_scores


class AudioVideoInter(nn.Module):
    """
    音频-视频交互模块
    功能：基于多头注意力实现音频和视频特征的跨模态融合，突出关键交互信息
    """
    def __init__(self, d_model, n_head, head_dropout=0.1):
        super(AudioVideoInter, self).__init__()

        self.dropout = nn.Dropout(0.1)  # 注意力输出dropout
        # 多头注意力层：用于跨模态特征交互
        self.video_multihead = MultiheadAttention(
            d_model,
            num_heads=n_head,       # 注意力头数
            dropout=head_dropout    # 注意力dropout
        )
        self.norm1 = nn.LayerNorm(d_model)  # 层归一化（稳定训练）

    def forward(self, video_feat, audio_feat):
        """
        前向传播：融合音频和视频特征
        Args:
            video_feat: 视频特征，shape=[seq_len, batch, d_model]
            audio_feat: 音频特征，shape=[seq_len, batch, d_model]
        Returns:
            交互增强后的特征，shape=[seq_len, batch, d_model]
        """
        # 生成全局特征：视频与音频特征逐元素相乘（初步融合）
        global_feat = video_feat * audio_feat                   # [seq_len, batch, d_model]
        # 构建记忆库：拼接音频和视频特征（提供跨模态参考）
        memory = torch.cat([audio_feat, video_feat], dim=0)     # [2*seq_len, batch, d_model]
        # 多头注意力：以全局特征为查询，记忆库为键值，增强关键交互信息
        mid_out = self.video_multihead(global_feat, memory, memory)[0]
        # 残差连接+层归一化：稳定训练并保留原始特征信息
        output = self.norm1(global_feat + self.dropout(mid_out))

        return output


class weak_main_model(nn.Module):
    """
    弱监督主模型
    功能：在弱标注数据下，通过跨模态融合和门控机制实现事件分类
    """
    def __init__(self, config):
        super(weak_main_model, self).__init__()
        self.config = config                # 配置字典（含超参数）
        self.beta = self.config["beta"]     # 注意力模块超参数
        self.alpha = self.config["alpha"]   # 门控权重超参数
        self.gamma = self.config["gamma"]   # 类别分数融合系数

        # 音频引导的空间-通道注意力（增强视频中与音频相关的信息）
        self.spatial_channel_att = New_Audio_Guided_Attention(self.beta).cuda()
        
        # 特征维度配置
        self.video_input_dim = self.config["video_inputdim"]    # 视频输入维度
        self.video_fc_dim = self.config["video_inputdim"]       # 视频全连接层输出维度
        self.d_model = self.config["d_model"]                   # 模型特征维度
        self.audio_input_dim = self.config["audio_inputdim"]    # 音频输入维度
        
        # 视频特征适配层（调整维度并增强非线性）
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.1)  # 防止过拟合

        # 模态内编码器（捕捉时序关系）
        self.video_encoder = InternalTemporalRelationModule(
            input_dim=self.video_fc_dim,
            d_model=self.d_model,
            feedforward_dim=2048
        )
        self.video_decoder = CrossModalRelationAttModule(
            input_dim=self.video_fc_dim,
            d_model=self.d_model,
            feedforward_dim=1024
        )

        # 跨模态解码器（捕捉跨模态关系）
        self.audio_encoder = InternalTemporalRelationModule(
            input_dim=self.audio_input_dim,
            d_model=self.d_model,
            feedforward_dim=2048
        )
        self.audio_decoder = CrossModalRelationAttModule(
            input_dim=self.audio_input_dim, 
            d_model=self.d_model, 
            feedforward_dim=1024
        )

        # 跨模态交互模块（双向）
        self.AVInter = AudioVideoInter(self.d_model, n_head=2, head_dropout=0.2)    # 音频-视频交互
        self.VAInter = AudioVideoInter(self.d_model, n_head=2, head_dropout=0.2)    # 视频-音频交互
        
        # 门控机制（动态调整模态权重）
        self.audio_gated = nn.Sequential(
            nn.Linear(self.d_model, 1), # 映射到权重
            nn.Sigmoid()                # 权重归一化到[0,1]
        )
        self.video_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )
        
        # 类别激活序列模块
        self.CAS_model = CAS_Module(d_model=self.d_model, num_class=28)
        self.classifier = nn.Linear(self.d_model, 1)    # 事件存在分类器
        self.softmax = nn.Softmax(dim=-1)               # 类别概率归一化
        
        # 音频/视频类别分数映射
        self.audio_cas = nn.Linear(self.d_model, 29)
        self.video_cas = nn.Linear(self.d_model, 29)

    def forward(self, visual_feature, audio_feature):
        """
        前向传播：弱监督场景下的事件分类
        Args:
            visual_feature: 视频特征，shape=[batch, seq_len, video_input_dim]
            audio_feature: 音频特征，shape=[batch, seq_len, audio_input_dim]
        Returns:
            av_gate: 音频-视频融合门控权重，shape=[batch, seq_len]
            raw_logits: 原始类别分数，shape=[batch, 29]
            event_scores: 最终事件类别概率，shape=[batch, 29]
        """
        # 音频特征维度调整：[batch, seq_len, dim] → [seq_len, batch, dim]（适配Transformer）
        audio_feature = audio_feature.transpose(1, 0).contiguous()
        
        # 视频特征预处理：全连接层调整维度 + 激活 + dropout
        visual_feature = self.v_fc(visual_feature)
        visual_feature = self.dropout(self.relu(visual_feature))

        # 空间-通道注意力：用音频特征增强视频特征（突出相关区域）
        visual_feature = self.spatial_channel_att(visual_feature, audio_feature)
        visual_feature = visual_feature.transpose(1, 0).contiguous()

        # 音频查询：基于视频特征编码结果，解码音频特征（音频关注视频）
        video_key_value_feature = self.video_encoder(visual_feature)                        # 视频模态内编码
        audio_query_output = self.audio_decoder(audio_feature, video_key_value_feature)     # 音频-视频跨模态解码

        # 视频查询：基于音频特征编码结果，解码视频特征（视频关注音频）
        audio_key_value_feature = self.audio_encoder(audio_feature)                         # 音频模态内编码
        video_query_output = self.video_decoder(visual_feature, audio_key_value_feature)    # 视频-音频跨模态解码

        # 计算门控权重（动态调整两种模态的贡献）
        audio_gate = self.audio_gated(video_key_value_feature)  # 基于视频特征的音频权重
        video_gate = self.video_gated(audio_key_value_feature)  # 基于音频特征的视频权重
        av_gate = (audio_gate + video_gate) / 2                 # 融合门控权重
        av_gate = av_gate.permute(1, 0, 2)                      # 调整维度：[seq_len, batch, 1] → [batch, seq_len, 1]

        # 门控调整特征：增强重要模态的贡献
        video_query_output = (1 - self.alpha)*video_query_output + audio_gate * video_query_output * self.alpha
        audio_query_output = (1 - self.alpha)*audio_query_output + video_gate * audio_query_output * self.alpha

        # 音频/视频类别分数映射
        video_cas = self.video_cas(video_query_output)  # [seq_len, batch, 29]
        audio_cas = self.audio_cas(audio_query_output)  # [seq_len, batch, 29]
        # 维度调整：[seq_len, batch, 29] → [batch, seq_len, 29]
        video_cas = video_cas.permute(1, 0, 2)
        audio_cas = audio_cas.permute(1, 0, 2)

        # 生成类别门控（基于sigmoid的权重）
        video_cas_gate = video_cas.sigmoid()
        audio_cas_gate = audio_cas.sigmoid()

        # 跨模态交互：进一步融合视频和音频特征
        video_query_output = self.AVInter(video_query_output, audio_query_output)
        audio_query_output = self.VAInter(audio_query_output, video_query_output)
        
        # 特征融合：取均值
        fused_content = (video_query_output+audio_query_output)/2
        # 维度调整：[seq_len, batch, d_model] → [batch, seq_len, d_model]
        fused_content = fused_content.transpose(0, 1)

        # 生成类别激活序列分数
        cas_score = self.CAS_model(fused_content)  # [batch, seq_len, 29]
        # 用类别门控增强关键类别分数
        cas_score = self.gamma*video_cas_gate*cas_score + self.gamma*audio_cas_gate*cas_score

        # 取Top-K分数的均值
        sorted_scores, _ = cas_score.sort(descending=True, dim=1)   # 按时序排序
        topk_scores = sorted_scores[:, :4, :]                       # 取前4个最高响应
        raw_logits = torch.mean(topk_scores, dim=1)[:, None, :]     # 均值作为原始类别分数

        # 用门控权重融合分数
        fused_logits = av_gate * raw_logits         # [batch, seq_len, 29]
        # 弱监督适配：取最大响应
        logits, _ = torch.max(fused_logits, dim=1)  # [batch, 29]
        # 归一化得到最终类别概率
        event_scores = self.softmax(logits)

        return av_gate.squeeze(), raw_logits.squeeze(), event_scores


class supv_main_model(nn.Module):
    """
    有监督主模型
    功能：在强标注数据（含事件位置和类别）下，实现事件定位与分类
    """
    def __init__(self, config):
        super(supv_main_model, self).__init__()
        self.config = config            # 配置字典
        self.beta = self.config["beta"] # 注意力超参数
        # 音频引导的空间-通道注意力
        self.spatial_channel_att = New_Audio_Guided_Attention(self.beta).cuda()

        # 特征维度配置
        self.video_input_dim = self.config['video_inputdim']
        self.audio_input_dim = self.config['audio_inputdim']
        self.video_fc_dim = 512                 # 视频全连接层输出维度
        self.d_model = self.config['d_model']   # 模型特征维度

        # 视频特征适配层
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.2)

        # 模态内编码器（捕捉时序关系）
        self.video_encoder = InternalTemporalRelationModule(
            input_dim=self.video_input_dim,
            d_model=self.d_model,
            feedforward_dim=1024
        )
        self.video_decoder = CrossModalRelationAttModule(
            input_dim=self.video_input_dim,
            d_model=self.d_model,
            feedforward_dim=1024
        )
        
        # 跨模态解码器（捕捉跨模态关系）
        self.audio_encoder = InternalTemporalRelationModule(
            input_dim=self.d_model,
            d_model=self.d_model,
            feedforward_dim=1024
        )
        self.audio_decoder = CrossModalRelationAttModule(
            input_dim=self.d_model,
            d_model=self.d_model,
            feedforward_dim=1024
        )
        
        # 音频-视频RNN编码器（初步时序建模）
        self.audio_visual_rnn_layer = RNNEncoder(
            audio_dim=self.audio_input_dim,
            video_dim=self.video_input_dim,
            d_model=self.d_model,
            num_layers=1
        )
        
        # 门控机制（动态调整模态权重）
        self.audio_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )
        self.video_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )
        
        # 跨模态交互模块（双向）
        self.AVInter = AudioVideoInter(self.d_model, n_head=4, head_dropout=0.2)
        self.VAInter = AudioVideoInter(self.d_model, n_head=4, head_dropout=0.2)
        
        # 有监督定位与分类模块
        self.localize_module = SupvLocalizeModule(self.d_model)
        
        # 层归一化（稳定训练）
        self.video_norm = nn.LayerNorm(self.d_model)
        self.audio_norm = nn.LayerNorm(self.d_model)
        
        # 音频/视频类别分数映射
        self.audio_cas = nn.Linear(self.d_model, 28)
        self.video_cas = nn.Linear(self.d_model, 28)

        self.alpha = self.config['alpha']   # 门控权重系数
        self.gamma = self.config['gamma']   # 分数融合系数


    def forward(self, visual_feature, audio_feature):
        """
        前向传播：有监督场景下的事件定位与分类
        Args:
            visual_feature: 视频特征，shape=[batch, seq_len, video_input_dim]
            audio_feature: 音频特征，shape=[batch, seq_len, audio_input_dim]
        Returns:
            is_event_scores: 事件时序定位分数，shape=[seq_len, batch, 1]
            event_scores: 事件类别分数，shape=[batch, 28]
            audio_visual_gate: 音频-视频门控权重，shape=[seq_len, batch, 1]
            av_score: 音频-视频融合类别分数，shape=[batch, 28]
        """
        # 音频RNN输入（原始特征）
        audio_rnn_input = audio_feature
        # 音频特征维度调整：[batch, seq_len, dim] → [seq_len, batch, dim]
        audio_feature = audio_feature.transpose(1, 0).contiguous()
        
        # 视频特征预处理：全连接层 + 激活 + dropout
        visual_feature = self.v_fc(visual_feature)
        visual_feature = self.dropout(self.relu(visual_feature))
        
        # 空间-通道注意力：用音频增强视频特征
        visual_feature = self.spatial_channel_att(visual_feature, audio_feature)
        visual_rnn_input = visual_feature  # 视频RNN输入
        
        # RNN初步时序编码：分别处理音频和视频特征
        audio_rnn_output1, visual_rnn_output1 = self.audio_visual_rnn_layer(audio_rnn_input, visual_rnn_input)
        # 维度调整适配Transformer：[batch, seq_len, dim] → [seq_len, batch, dim]
        audio_encoder_input1 = audio_rnn_output1.transpose(1, 0).contiguous()
        visual_encoder_input1 = visual_rnn_output1.transpose(1, 0).contiguous()
        
        # 音频查询：基于视频编码结果解码音频（音频关注视频）
        video_key_value_feature = self.video_encoder(visual_encoder_input1)
        audio_query_output = self.audio_decoder(audio_encoder_input1, video_key_value_feature)

        # 视频查询：基于音频编码结果解码视频（视频关注音频）
        audio_key_value_feature = self.audio_encoder(audio_encoder_input1)
        video_query_output = self.video_decoder(visual_encoder_input1, audio_key_value_feature)

        # 计算门控权重
        audio_gate = self.audio_gated(audio_key_value_feature)
        video_gate = self.video_gated(video_key_value_feature)
        audio_visual_gate = audio_gate * video_gate     # 音频-视频联合门控

        # 门控调整特征
        video_query_output = video_query_output + audio_gate * video_query_output * self.alpha
        audio_query_output = audio_query_output + video_gate * audio_query_output * self.alpha

        # 音频/视频类别分数映射
        video_cas = self.video_cas(video_query_output)  # [seq_len, batch, 28]
        audio_cas = self.audio_cas(audio_query_output)  # [seq_len, batch, 28]
        # 维度调整：[seq_len, batch, 28] → [batch, seq_len, 28]
        video_cas = video_cas.permute(1, 0, 2)
        audio_cas = audio_cas.permute(1, 0, 2)
        
        # 取Top-K分数的均值（增强鲁棒性）
        sorted_scores_video, _ = video_cas.sort(descending=True, dim=1)
        topk_scores_video = sorted_scores_video[:, :4, :]   # 前4个最高响应
        score_video = torch.mean(topk_scores_video, dim=1)  # 视频类别分数

        sorted_scores_audio, _ = audio_cas.sort(descending=True, dim=1)
        topk_scores_audio = sorted_scores_audio[:, :4, :]
        score_audio = torch.mean(topk_scores_audio, dim=1)  # 音频类别分数
        
        # 融合音频和视频类别分数
        av_score = (score_video + score_audio) / 2
        
        # 跨模态交互：进一步融合特征
        video_query_output = self.AVInter(video_query_output, audio_query_output)
        audio_query_output = self.VAInter(audio_query_output, video_query_output)
        
        # 定位与分类：基于融合特征预测定位分数和类别
        is_event_scores, event_scores = self.localize_module((video_query_output + audio_query_output)/2)
        # 用融合分数增强类别预测
        event_scores = event_scores + self.gamma*av_score

        return is_event_scores, event_scores, audio_visual_gate, av_score