import torch
from torch import nn
import torch.nn.functional as F
from .models import New_Audio_Guided_Attention
from .models import EncoderLayer, Encoder, DecoderLayer, Decoder
from torch.nn import MultiheadAttention
from .Dual_lstm import Dual_lstm


class RNNEncoder(nn.Module):
    """RNN编码器：分别处理音频和视频特征的时序建模"""
    
    def __init__(self, audio_dim, video_dim, d_model, num_layers):
        super(RNNEncoder, self).__init__()
        self.d_model = d_model
        # 音频特征编码：双向LSTM，输出维度为d_model/2，最终拼接为d_model
        self.audio_rnn = nn.LSTM(audio_dim, int(d_model / 2), num_layers=num_layers, 
                                batch_first=True, bidirectional=True, dropout=0.2)
        # 视频特征编码：双向LSTM，直接输出d_model维度
        self.visual_rnn = nn.LSTM(video_dim, d_model, num_layers=num_layers,
                                 batch_first=True, bidirectional=True, dropout=0.2)

    def forward(self, audio_feature, visual_feature):
        # 音频特征时序编码：[batch, seq_len, audio_dim] -> [batch, seq_len, d_model]
        audio_output, _ = self.audio_rnn(audio_feature)
        # 视频特征时序编码：[batch, seq_len, video_dim] -> [batch, seq_len, d_model*2]
        video_output, _ = self.visual_rnn(visual_feature)
        return audio_output, video_output


class InternalTemporalRelationModule(nn.Module):
    """内部时序关系模块：使用Transformer编码器建模单模态时序依赖"""
    
    def __init__(self, input_dim, d_model, feedforward_dim):
        super(InternalTemporalRelationModule, self).__init__()
        # Transformer编码器层配置：4头注意力，前馈网络维度为feedforward_dim
        self.encoder_layer = EncoderLayer(d_model=d_model, nhead=4, 
                                         dim_feedforward=feedforward_dim)
        # 2层Transformer编码器堆叠
        self.encoder = Encoder(self.encoder_layer, num_layers=2)
        # 输入维度适配层：将输入特征映射到d_model维度
        self.affine_matrix = nn.Linear(input_dim, d_model)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, feature):
        # 输入特征维度适配
        feature = self.affine_matrix(feature)
        # Transformer时序关系建模
        feature = self.encoder(feature)
        return feature


class CrossModalRelationAttModule(nn.Module):
    """跨模态关系注意力模块：使用Transformer解码器实现模态间信息交互"""
    
    def __init__(self, input_dim, d_model, feedforward_dim):
        super(CrossModalRelationAttModule, self).__init__()
        # Transformer解码器层配置
        self.decoder_layer = DecoderLayer(d_model=d_model, nhead=4,
                                         dim_feedforward=feedforward_dim)
        # 1层Transformer解码器
        self.decoder = Decoder(self.decoder_layer, num_layers=1)
        # 查询特征维度适配层
        self.affine_matrix = nn.Linear(input_dim, d_model)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, query_feature, memory_feature):
        # 查询特征维度适配
        query_feature = self.affine_matrix(query_feature)
        # 跨模态注意力：query_feature查询，memory_feature作为键值对
        output = self.decoder(query_feature, memory_feature)
        return output


class CAS_Module(nn.Module):
    """分类激活序列模块：生成时序级别的分类分数"""
    
    def __init__(self, d_model, num_class=28):
        super(CAS_Module, self).__init__()
        self.d_model = d_model
        self.num_class = num_class
        self.dropout = nn.Dropout(0.2)
        # 1D卷积分类器：每个时间步生成分类分数
        self.classifier = nn.Sequential(
            nn.Conv1d(in_channels=d_model, out_channels=self.num_class+1,
                     kernel_size=1, stride=1, padding=0, bias=False)
        )

    def forward(self, content):
        # 维度调整：[batch, seq_len, dim] -> [batch, dim, seq_len]
        content = content.permute(0, 2, 1)
        # 1D卷积分类：每个时间步生成num_class+1个分类分数
        out = self.classifier(content)
        # 维度恢复：[batch, dim, seq_len] -> [batch, seq_len, num_class+1]
        out = out.permute(0, 2, 1)
        return out


class SupvLocalizeModule(nn.Module):
    """有监督定位模块：同时预测事件边界和事件类别"""
    
    def __init__(self, d_model):
        super(SupvLocalizeModule, self).__init__()
        self.relu = nn.ReLU(inplace=True)
        # 边界回归器：每个时间步预测是否为事件边界
        self.classifier = nn.Linear(d_model, 1)
        # 事件分类器：预测整个视频的事件类别
        self.event_classifier = nn.Linear(d_model, 28)

    def forward(self, fused_content):
        # 时序最大池化：获取整个序列的最显著特征
        max_fused_content, _ = fused_content.transpose(1, 0).max(1)
        # 边界预测分数：[seq_len, batch, 1]
        logits = self.classifier(fused_content)
        # 事件类别预测：[batch, 28]
        class_logits = self.event_classifier(max_fused_content)
        class_scores = class_logits
        return logits, class_scores


class WeaklyLocalizationModule(nn.Module):
    """弱监督定位模块：仅使用视频级标签进行时序定位"""
    
    def __init__(self, input_dim):
        super(WeaklyLocalizationModule, self).__init__()
        self.hidden_dim = input_dim
        # 事件存在分类器：每个时间步预测是否包含事件
        self.classifier = nn.Linear(self.hidden_dim, 1)
        # 事件类别分类器：预测事件类别
        self.event_classifier = nn.Linear(self.hidden_dim, 29)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, fused_content):
        # 维度调整：[seq_len, batch, dim] -> [batch, seq_len, dim]
        fused_content = fused_content.transpose(0, 1)
        # 时序最大池化
        max_fused_content, _ = fused_content.max(1)
        
        # 事件存在概率分数
        is_event_scores = self.classifier(fused_content)
        # 原始事件分类logits
        raw_logits = self.event_classifier(max_fused_content)[:, None, :]
        # 融合分数：存在概率 × 分类分数
        fused_logits = is_event_scores.sigmoid() * raw_logits
        # 时序最大池化适应视频级标签
        logits, _ = torch.max(fused_logits, dim=1)
        # 最终事件分类概率
        event_scores = self.softmax(logits)

        return is_event_scores.squeeze(), raw_logits.squeeze(), event_scores


class AudioVideoInter(nn.Module):
    """音视频交互模块：使用多头注意力实现模态间信息融合"""
    
    def __init__(self, d_model, n_head, head_dropout=0.1):
        super(AudioVideoInter, self).__init__()
        self.dropout = nn.Dropout(0.1)
        # 多头注意力机制：d_model维度，n_head个头
        self.video_multihead = MultiheadAttention(d_model, num_heads=n_head, 
                                                 dropout=head_dropout)
        # 层归一化
        self.norm1 = nn.LayerNorm(d_model)

    def forward(self, video_feat, audio_feat):
        # 初始特征融合：逐元素相乘
        global_feat = video_feat * audio_feat
        # 构建注意力机制的键值对：拼接音频和视频特征
        memory = torch.cat([audio_feat, video_feat], dim=0)
        # 多头注意力计算
        mid_out = self.video_multihead(global_feat, memory, memory)[0]
        # 残差连接 + 层归一化
        output = self.norm1(global_feat + self.dropout(mid_out))
        return output


class weak_main_model(nn.Module):
    """弱监督主模型：基于音频引导注意力的多模态事件定位"""
    
    def __init__(self, config):
        super(weak_main_model, self).__init__()
        self.config = config
        # 超参数配置
        self.beta = self.config["beta"]
        self.alpha = self.config["alpha"]
        self.gamma = self.config["gamma"]
        
        # 空间通道注意力模块
        self.spatial_channel_att = New_Audio_Guided_Attention(self.beta).cuda()
        
        # 特征维度配置
        self.video_input_dim = self.config["video_inputdim"]
        self.video_fc_dim = self.config["video_inputdim"]
        self.d_model = self.config["d_model"]
        self.audio_input_dim = self.config["audio_inputdim"]
        
        # 视频特征适配层
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.1)

        # 时序关系建模模块
        self.video_encoder = InternalTemporalRelationModule(input_dim=self.video_fc_dim, 
                                                           d_model=self.d_model, feedforward_dim=2048)
        self.video_decoder = CrossModalRelationAttModule(input_dim=self.video_fc_dim,
                                                        d_model=self.d_model, feedforward_dim=1024)
        self.audio_encoder = InternalTemporalRelationModule(input_dim=self.audio_input_dim,
                                                           d_model=self.d_model, feedforward_dim=2048)
        self.audio_decoder = CrossModalRelationAttModule(input_dim=self.audio_input_dim,
                                                        d_model=self.d_model, feedforward_dim=1024)
        
        # 音视频交互模块
        self.AVInter = AudioVideoInter(self.d_model, n_head=2, head_dropout=0.2)
        self.VAInter = AudioVideoInter(self.d_model, n_head=2, head_dropout=0.2)
        
        # 门控机制
        self.audio_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )
        self.video_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )
        
        # 分类和CAS模块
        self.CAS_model = CAS_Module(d_model=self.d_model, num_class=28)
        self.classifier = nn.Linear(self.d_model, 1)
        self.softmax = nn.Softmax(dim=-1)
        self.audio_cas = nn.Linear(self.d_model, 29)
        self.video_cas = nn.Linear(self.d_model, 29)

    def forward(self, visual_feature, audio_feature):
        # 输入特征预处理
        audio_feature = audio_feature.transpose(1, 0).contiguous()
        visual_feature = self.v_fc(visual_feature)
        visual_feature = self.dropout(self.relu(visual_feature))

        # 空间通道注意力应用
        visual_feature = self.spatial_channel_att(visual_feature, audio_feature)
        visual_feature = visual_feature.transpose(1, 0).contiguous()

        # 音频查询路径：视频作为键值，音频作为查询
        video_key_value_feature = self.video_encoder(visual_feature)
        audio_query_output = self.audio_decoder(audio_feature, video_key_value_feature)

        # 视频查询路径：音频作为键值，视频作为查询
        audio_key_value_feature = self.audio_encoder(audio_feature)
        video_query_output = self.video_decoder(visual_feature, audio_key_value_feature)

        # 门控权重计算
        audio_gate = self.audio_gated(video_key_value_feature)
        video_gate = self.video_gated(audio_key_value_feature)
        av_gate = (audio_gate + video_gate) / 2
        av_gate = av_gate.permute(1, 0, 2)

        # 门控特征增强
        video_query_output = (1 - self.alpha)*video_query_output + audio_gate * video_query_output * self.alpha
        audio_query_output = (1 - self.alpha)*audio_query_output + video_gate * audio_query_output * self.alpha

        # CAS分数计算
        video_cas = self.video_cas(video_query_output)
        audio_cas = self.audio_cas(audio_query_output)
        video_cas = video_cas.permute(1, 0, 2)
        audio_cas = audio_cas.permute(1, 0, 2)

        # 门控CAS分数
        video_cas_gate = video_cas.sigmoid()
        audio_cas_gate = audio_cas.sigmoid()

        # 音视频交互增强
        video_query_output = self.AVInter(video_query_output, audio_query_output)
        audio_query_output = self.VAInter(audio_query_output, video_query_output)

        # 特征融合
        fused_content = (video_query_output + audio_query_output) / 2
        fused_content = fused_content.transpose(0, 1)

        # CAS分数生成
        cas_score = self.CAS_model(fused_content)
        cas_score = self.gamma*video_cas_gate*cas_score + self.gamma*audio_cas_gate*cas_score

        # Top-K分数聚合
        sorted_scores, _ = cas_score.sort(descending=True, dim=1)
        topk_scores = sorted_scores[:, :4, :]
        raw_logits = torch.mean(topk_scores, dim=1)[:, None, :]

        # 最终分数融合
        fused_logits = av_gate * raw_logits
        logits, _ = torch.max(fused_logits, dim=1)
        event_scores = self.softmax(logits)

        return av_gate.squeeze(), raw_logits.squeeze(), event_scores


class supv_main_model(nn.Module):
    """有监督主模型：结合RNN和Transformer的多模态事件检测"""
    
    def __init__(self, config):
        super(supv_main_model, self).__init__()
        self.config = config
        self.beta = self.config["beta"]
        
        # 注意力模块
        self.spatial_channel_att = New_Audio_Guided_Attention(self.beta).cuda()
        
        # 特征维度配置
        self.video_input_dim = self.config['video_inputdim']
        self.audio_input_dim = self.config['audio_inputdim']
        self.video_fc_dim = 512
        self.d_model = self.config['d_model']

        # 视频特征处理层
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.2)

        # 时序关系模块
        self.video_encoder = InternalTemporalRelationModule(input_dim=self.video_input_dim, 
                                                           d_model=self.d_model, feedforward_dim=1024)
        self.video_decoder = CrossModalRelationAttModule(input_dim=self.video_input_dim,
                                                        d_model=self.d_model, feedforward_dim=1024)
        self.audio_encoder = InternalTemporalRelationModule(input_dim=self.d_model,
                                                           d_model=self.d_model, feedforward_dim=1024)
        self.audio_decoder = CrossModalRelationAttModule(input_dim=self.d_model,
                                                        d_model=self.d_model, feedforward_dim=1024)
        
        # RNN编码器
        self.audio_visual_rnn_layer = RNNEncoder(audio_dim=self.audio_input_dim, 
                                                video_dim=self.video_input_dim, 
                                                d_model=self.d_model, num_layers=1)
        
        # 门控机制
        self.audio_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )
        self.video_gated = nn.Sequential(
            nn.Linear(self.d_model, 1),
            nn.Sigmoid()
        )

        # 交互和定位模块
        self.AVInter = AudioVideoInter(self.d_model, n_head=4, head_dropout=0.2)
        self.VAInter = AudioVideoInter(self.d_model, n_head=4, head_dropout=0.2)
        self.localize_module = SupvLocalizeModule(self.d_model)
        
        # 归一化和分类层
        self.video_norm = nn.LayerNorm(self.d_model)
        self.audio_norm = nn.LayerNorm(self.d_model)
        self.audio_cas = nn.Linear(self.d_model, 28)
        self.video_cas = nn.Linear(self.d_model, 28)
        
        self.alpha = self.config['alpha']
        self.gamma = self.config['gamma']

    def forward(self, visual_feature, audio_feature):
        # 特征预处理
        audio_rnn_input = audio_feature
        audio_feature = audio_feature.transpose(1, 0).contiguous()
        visual_feature = self.v_fc(visual_feature)
        visual_feature = self.dropout(self.relu(visual_feature))

        # 空间通道注意力
        visual_feature = self.spatial_channel_att(visual_feature, audio_feature)
        visual_rnn_input = visual_feature

        # RNN时序编码
        audio_rnn_output1, visual_rnn_output1 = self.audio_visual_rnn_layer(audio_rnn_input, visual_rnn_input)
        audio_encoder_input1 = audio_rnn_output1.transpose(1, 0).contiguous()
        visual_encoder_input1 = visual_rnn_output1.transpose(1, 0).contiguous()

        # 音频查询路径
        video_key_value_feature = self.video_encoder(visual_encoder_input1)
        audio_query_output = self.audio_decoder(audio_encoder_input1, video_key_value_feature)

        # 视频查询路径
        audio_key_value_feature = self.audio_encoder(audio_encoder_input1)
        video_query_output = self.video_decoder(visual_encoder_input1, audio_key_value_feature)

        # 门控权重
        audio_gate = self.audio_gated(audio_key_value_feature)
        video_gate = self.video_gated(video_key_value_feature)
        audio_visual_gate = audio_gate * video_gate

        # 门控特征增强
        video_query_output = video_query_output + audio_gate * video_query_output * self.alpha
        audio_query_output = audio_query_output + video_gate * audio_query_output * self.alpha

        # CAS分数计算和Top-K聚合
        video_cas = self.video_cas(video_query_output)
        audio_cas = self.audio_cas(audio_query_output)
        video_cas = video_cas.permute(1, 0, 2)
        audio_cas = audio_cas.permute(1, 0, 2)
        
        # Top-K分数聚合
        sorted_scores_video, _ = video_cas.sort(descending=True, dim=1)
        topk_scores_video = sorted_scores_video[:, :4, :]
        score_video = torch.mean(topk_scores_video, dim=1)
        
        sorted_scores_audio, _ = audio_cas.sort(descending=True, dim=1)
        topk_scores_audio = sorted_scores_audio[:, :4, :]
        score_audio = torch.mean(topk_scores_audio, dim=1)

        # 音视频分数融合
        av_score = (score_video + score_audio) / 2

        # 音视频交互
        video_query_output = self.AVInter(video_query_output, audio_query_output)
        audio_query_output = self.VAInter(audio_query_output, video_query_output)

        # 定位和分类
        is_event_scores, event_scores = self.localize_module((video_query_output + audio_query_output) / 2)
        # 分数增强
        event_scores = event_scores + self.gamma * av_score

        return is_event_scores, event_scores, audio_visual_gate, av_score