import torch
import copy
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import Module
from torch.nn import MultiheadAttention
from torch.nn import ModuleList
from torch.nn.init import xavier_uniform_
from torch.nn import Dropout
from torch.nn import Linear
from torch.nn import LayerNorm
import math
from torch.autograd import Variable



class Dual_lstm_cell(nn.Module):
    def __init__(self, visual_input_dim, audio_input_dim, hidden_dim, alph=0.5, bias=True):
        """
        初始化双LSTM单元
        
        参数:
            visual_input_dim: 视觉输入特征维度
            audio_input_dim: 音频输入特征维度
            hidden_dim: 隐藏层维度
            alph: 跨模态交互权重系数(当前实现中未启用)
            bias: 是否使用偏置项
        """
        super(Dual_lstm_cell, self).__init__()
        # 视觉输入维度
        self.visual_input_dim = visual_input_dim
        # 音频输入维度
        self.audio_input_dim  = audio_input_dim
        # 隐藏层
        self.hidden_dim = hidden_dim
        # 跨模态交互系数
        self.alph = alph
        # 视觉LSTM门控参数 (输入到门控的线性变换)
        self.vs_linear = nn.Linear(self.visual_input_dim, 4 * self.hidden_dim, bias=bias)
        # 视觉LSTM门控参数 (隐藏状态到门控的线性变换)
        self.vh_linear = nn.Linear(self.hidden_dim, 4* self.hidden_dim, bias=bias)
        # 音频LSTM门控参数 (输入到门控的线性变换)
        self.as_linear = nn.Linear(self.audio_input_dim, 4 * self.hidden_dim, bias=bias)
         # 音频LSTM门控参数 (隐藏状态到门控的线性变换)
        self.ah_linear = nn.Linear(self.hidden_dim, 4 * self.hidden_dim, bias=bias)

        # 额外的门控参数(可能用于跨模态交互，当前实现中未启用)
        self.as_linear2 = nn.Linear(self.audio_input_dim, 4*self.hidden_dim, bias=bias)
        self.ah_linear2 = nn.Linear(self.hidden_dim, 4*self.hidden_dim, bias=bias)
        self.vs_linear2 = nn.Linear(self.visual_input_dim, 4*self.hidden_dim, bias=bias)
        self.vh_linear2 = nn.Linear(self.hidden_dim, 4*self.hidden_dim, bias=bias)
        # 初始化参数   
        self.reset_parameters()

    def reset_parameters(self):
        """参数初始化: 使用均匀分布初始化权重"""
        std = 1.0 / math.sqrt(self.hidden_dim)
        for w in self.parameters():
            w.data.uniform_(-std, std)

    def forward(self, visual_state, visual_hidden, visual_cell, audio_state, audio_hidden, audio_cell):
        """
        前向传播计算
        
        参数:
            visual_state: 当前时刻视觉输入
            visual_hidden: 视觉上一时刻隐藏状态
            visual_cell: 视觉上一时刻细胞状态
            audio_state: 当前时刻音频输入
            audio_hidden: 音频上一时刻隐藏状态
            audio_cell: 音频上一时刻细胞状态
            
        返回:
            visual_output: 视觉当前时刻输出
            visual_cell: 视觉当前时刻细胞状态
            audio_output: 音频当前时刻输出
            audio_cell: 音频当前时刻细胞状态
        """
         # 计算视觉门控 (输入+隐藏状态)
        visual_gates = self.vs_linear(visual_state) + self.vh_linear(visual_hidden)
            #self.alph*self.as_linear(audio_state) + self.alph*self.ah_linear(audio_hidden)

         # 计算音频门控 (输入+隐藏状态)
        audio_gates = self.as_linear2(audio_state) + self.ah_linear2(audio_hidden)
            #self.alph*self.vs_linear2(visual_state) + self.alph*self.vh_linear2(visual_hidden)
        # 将门控拆分为输入门、遗忘门、细胞门、输出门 (按特征维度拆分)
        visual_i_gate, visual_f_gate, visual_c_gate, visual_o_gate = visual_gates.chunk(4,1)
        audio_i_gate, audio_f_gate, audio_c_gate, audio_o_gate = audio_gates.chunk(4,1)
        
        # 视觉门控激活函数
        visual_i_gate = F.sigmoid(visual_i_gate) # 输入门：控制新信息流入
        visual_f_gate = F.sigmoid(visual_f_gate) # 遗忘门：控制旧信息保留
        visual_c_gate = F.tanh(visual_c_gate)    # 细胞门：生成新的候选细胞状态
        visual_o_gate = F.sigmoid(visual_o_gate) # 输出门：控制细胞状态输出

        # 更新视觉细胞状态和输出
        visual_cell = visual_f_gate * visual_cell + visual_i_gate * visual_c_gate  # 细胞状态更新
        visual_output = visual_o_gate * torch.tanh(visual_cell)                    # 隐藏状态输出

        # 音频门控激活函数
        audio_i_gate = F.sigmoid(audio_i_gate) # 输入门
        audio_f_gate = F.sigmoid(audio_f_gate) # 遗忘门
        audio_c_gate = F.tanh(audio_c_gate)    # 细胞门 
        audio_o_gate = F.sigmoid(audio_o_gate) # 输出门

        # 更新音频细胞状态和输出
        audio_cell = audio_f_gate * audio_cell + audio_i_gate * audio_c_gate  # 细胞状态更新
        audio_output = audio_o_gate * torch.tanh(audio_cell)                  # 隐藏状态输出

        return visual_output, visual_cell, audio_output, audio_cell

class Dual_lstm(nn.Module):
    """
    双LSTM模型，用于并行处理视觉和音频时序序列
    包含特征预处理和双向LSTM结构(当前仅实现前向，反向部分被注释)
    """
    def __init__(self):
        """初始化双LSTM模型参数"""
        super(Dual_lstm, self).__init__()

        # 视频输入特征维度
        self.video_input_dim = 512
        # 视频全连接层输出维度
        self.video_fc_dim = 512
        # 模型隐藏层维度
        self.d_model = 256
        self.v_fc = nn.Linear(self.video_input_dim, self.video_fc_dim)
        # 初始化双LSTM单元 (视觉输入512维，音频输入128维，隐藏层256维)
        self.LSTM_cell = Dual_lstm_cell(visual_input_dim=512, audio_input_dim=128, hidden_dim=256)
        #self.LSTM_cell_r = Dual_lstm_cell(visual_input_dim=512, audio_input_dim=128, hidden_dim=256)


        self.relu = nn.ReLU()          # ReLU激活函数
        self.dropout = nn.Dropout(0.2) # Dropout层，防止过拟合


    def forward(self, audio_feature, visual_feature):
        """
        前向传播过程
        
        参数:
            audio_feature: 音频特征，形状为 [batch_size, seq_len, audio_input_dim]
            visual_feature: 视觉特征，形状为 [batch_size, seq_len, visual_input_dim]
            
        返回:
            audio_output: 音频时序输出，形状为 [batch_size, seq_len, hidden_dim]
            visual_output: 视觉时序输出，形状为 [batch_size, seq_len, hidden_dim]
        """
        # 音频输入特征
        audio_rnn_input = audio_feature
        # 视觉输入特征
        visual_rnn_input = visual_feature

        # 初始化视觉隐藏状态和细胞状态 (前向)
        if torch.cuda.is_available():   # 若有GPU则使用GPU
            visual_hidden = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model).cuda())
            visual_hidden_r = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model).cuda())
        else:    # 否则使用CPU
            visual_hidden = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model))
            visual_hidden_r = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model))

        # 初始化视觉细胞状态 (前向)
        if torch.cuda.is_available():
            visual_cell = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model).cuda())
            visual_cell_r = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model).cuda())
        else:
            visual_cell = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model))
            visual_cell_r = Variable(torch.zeros(visual_rnn_input.size(0), self.d_model))
       
        # 初始化音频隐藏状态 (前向)
        if torch.cuda.is_available():
            audio_hidden = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model).cuda())
            audio_hidden_r = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model).cuda())
        else:
            audio_hidden = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model))
            audio_hidden_r = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model))
        
        # 初始化音频细胞状态 (前向)
        if torch.cuda.is_available():
            audio_cell = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model).cuda())
            audio_cell_r = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model).cuda())
        else:
            audio_cell = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model))
            audio_cell_r = Variable(torch.zeros(audio_rnn_input.size(0), self.d_model))

        # 存储输出的列表
        visual_output = []
        audio_output = []
        visual_output_r = []  # 反向输出(未启用)
        audio_output_r = []   # 反向输出(未启用)
        length = visual_rnn_input.size(1)  # 时序序列长度
       
        # 转换数据类型为double，与输入特征保持一致
        visual_hidden = visual_hidden.double()
        visual_cell = visual_cell.double()
        audio_hidden = audio_hidden.double()
        audio_cell = audio_cell.double()
        visual_hidden_r = visual_hidden_r.double()
        visual_cell_r = visual_cell_r.double()
        audio_hidden_r = audio_hidden_r.double()
        audio_cell_r = audio_cell_r.double()

        # 前向LSTM计算 (按时间步迭代)
        for i in range(length):
            visual_hidden, visual_cell, audio_hidden, audio_cell = self.LSTM_cell(visual_rnn_input[:,i,:], visual_hidden, visual_cell,
            audio_rnn_input[:,i,:], audio_hidden, audio_cell)
            
            # 保存当前时间步的输出            
            visual_output.append(visual_hidden)
            audio_output.append(audio_hidden)

        # 将列表转换为张量（时间步维度拼接）
        # 输出形状：(batch_size, seq_len, d_model)
        visual_output = torch.stack(visual_output,dim=1)
        audio_output = torch.stack(audio_output, dim=1)


        # for i in range(length):
        #     visual_hidden_r, visual_cell_r, audio_hidden_r, audio_cell_r = self.LSTM_cell_r(visual_rnn_input[:,length-1-i,:], visual_hidden_r,
        #                                                                                     visual_cell_r, audio_rnn_input[:,length-1-i,:],
        #                                                                                     audio_hidden_r, audio_cell_r)
        #     visual_output_r.append(visual_hidden_r)
        #     audio_output_r.append(audio_hidden_r)

        # visual_output_r = torch.stack(visual_output_r, dim=1)
        # visual_output_r = torch.flip(visual_output_r, dims=[1])
        # audio_output_r = torch.stack(audio_output_r, dim=1)
        # audio_output_r = torch.flip(audio_output_r, dims=[1])
        # visual_output = torch.cat((visual_output, visual_output_r), dim=2)
        # audio_output = torch.cat((audio_output, audio_output_r), dim=2)
        return audio_output, visual_output


# model = Dual_lstm()
# visual_feature = torch.randn(32, 10,512)
# audio_feature = torch.randn(32, 10, 128)
# model(audio_feature, visual_feature)
#
