
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import torch.nn.init as init
from session_encoder import TripleTransformer, TransformerLayerAbs
from transformer_encoder import TransformerEncoder, TransformerEncoderLayer
from sentence_encoder import Unimodal_GatedFusion
from crossmodal_fusion import Multimodal_GatedFusion
from sa_bigru import BiSA_GRU


class Hierarchical_Transformer_Model(nn.Module):
    def __init__(self, dataset, temp, D_text, D_visual, D_audio, n_head,
                 n_classes, hidden_dim, n_speakers,num_block ,dropout):
        super(Hierarchical_Transformer_Model, self).__init__()
        self.temp = temp
        self.n_classes = n_classes
        self.n_speakers = n_speakers
        if self.n_speakers == 2:
            padding_idx = 2
        if self.n_speakers == 9:
            padding_idx = 9
        self.speaker_embeddings = nn.Embedding(n_speakers+1, hidden_dim, padding_idx)

        # 时间卷积层
        # self.textf_input = nn.Conv1d(D_text, hidden_dim, kernel_size=1, padding=0, bias=False)
        # self.acouf_input = nn.Conv1d(D_audio, hidden_dim, kernel_size=1, padding=0, bias=False)
        # self.visuf_input = nn.Conv1d(D_visual, hidden_dim, kernel_size=1, padding=0, bias=False)

        # # sa-gru
        self.text_input = BiSA_GRU(D_text, hidden_dim)
        self.acou_input = BiSA_GRU(D_audio, hidden_dim)
        self.visu_input = BiSA_GRU(D_visual, hidden_dim)

        # Intra- and Inter-modal Transformers  多式联运和多式联运变压器
        self.t_t = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)
        self.a_t = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)
        self.v_t = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)

        self.a_a = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)
        self.t_a = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)
        self.v_a = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)

        self.v_v = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)
        self.t_v = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)
        self.a_v = TransformerEncoder(d_model=hidden_dim, d_ff=hidden_dim, heads=n_head, layers=1, dropout=dropout)

        # Unimodal-level Gated Fusion  单峰级门控融合
        self.t_t_gate = Unimodal_GatedFusion(hidden_dim, dataset)
        self.a_t_gate = Unimodal_GatedFusion(hidden_dim, dataset)
        self.v_t_gate = Unimodal_GatedFusion(hidden_dim, dataset)

        self.a_a_gate = Unimodal_GatedFusion(hidden_dim, dataset)
        self.t_a_gate = Unimodal_GatedFusion(hidden_dim, dataset)
        self.v_a_gate = Unimodal_GatedFusion(hidden_dim, dataset)

        self.v_v_gate = Unimodal_GatedFusion(hidden_dim, dataset)
        self.t_v_gate = Unimodal_GatedFusion(hidden_dim, dataset)
        self.a_v_gate = Unimodal_GatedFusion(hidden_dim, dataset)

        self.features_reduce_t = nn.Linear(3 * hidden_dim, hidden_dim)
        self.features_reduce_a = nn.Linear(3 * hidden_dim, hidden_dim)
        self.features_reduce_v = nn.Linear(3 * hidden_dim, hidden_dim)

        self.features_reduce_t_a = nn.Linear(4 * hidden_dim, hidden_dim)
        self.features_reduce_t_v = nn.Linear(4 * hidden_dim, hidden_dim)
        self.features_reduce_v_a = nn.Linear(4 * hidden_dim, hidden_dim)

        # Multimodal-level Gated Fusion  多模态级门控融合
        self.last_gate = Multimodal_GatedFusion(hidden_dim)

        # 会话建模模块
        self.hier_trans = TripleTransformer(
            layer=TransformerLayerAbs(hidden_dim, n_head, dim_feedforward=1024, dropout=0.5, activation='relu'),
            nhead=n_head,
            num_layer=1,
            emb_dim=hidden_dim,
            max_len = 2000,
            bidirectional=True,
            num_block=3
        )

        # Emotion Classifier  情绪分类器
        self.t_output_layer = nn.Sequential(
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, n_classes)
            )
        self.a_output_layer = nn.Sequential(
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, n_classes)
            )
        self.v_output_layer = nn.Sequential(
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, n_classes)
            )
        # 修改Emotion Classifier  情绪分类器
        self.t_output_layer_1 = nn.Sequential(
            # nn.BatchNorm1d(hidden_dim),  # 或者使用 nn.BatchNorm1d(hidden_dim, momentum=0.1) 设置momentum参数
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, n_classes)
        )
        self.a_output_layer_1 = nn.Sequential(
            # nn.BatchNorm1d(hidden_dim),  # 或者使用 nn.BatchNorm1d(hidden_dim, momentum=0.1) 设置momentum参数
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, n_classes)
        )
        self.v_output_layer_1 = nn.Sequential(
            # nn.BatchNorm1d(hidden_dim),  # 或者使用 nn.BatchNorm1d(hidden_dim, momentum=0.1) 设置momentum参数
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, n_classes)
        )

        self.all_output_layer = nn.Linear(hidden_dim, n_classes)

    def forward(self, textf, visuf, acouf, u_mask, qmask, dia_len, mode):
        spk_idx = torch.argmax(qmask, -1)
        origin_spk_idx = spk_idx
        if self.n_speakers == 2:
            for i, x in enumerate(dia_len):
                spk_idx[i, x:] = (2*torch.ones(origin_spk_idx[i].size(0)-x)).int().cuda()
        if self.n_speakers == 9:
            for i, x in enumerate(dia_len):
                spk_idx[i, x:] = (9*torch.ones(origin_spk_idx[i].size(0)-x)).int().cuda()
        spk_embeddings = self.speaker_embeddings(spk_idx)

        # 时间卷积层
        # textf = self.textf_input(textf.permute(1, 2, 0)).transpose(1, 2)
        # acouf = self.acouf_input(acouf.permute(1, 2, 0)).transpose(1, 2)
        # visuf = self.visuf_input(visuf.permute(1, 2, 0)).transpose(1, 2)

        # # sa-gru
        textf = self.text_input(textf.transpose(0, 1))
        acouf = self.acou_input(acouf.transpose(0, 1))
        visuf = self.visu_input(visuf.transpose(0, 1))

        # Intra- and Inter-modal Transformers 多式联运
        # 多式联运变压器
        t_t_transformer_out = self.t_t(textf, textf, u_mask, spk_embeddings)
        a_t_transformer_out = self.a_t(acouf, textf, u_mask, spk_embeddings)
        v_t_transformer_out = self.v_t(visuf, textf, u_mask, spk_embeddings)

        a_a_transformer_out = self.a_a(acouf, acouf, u_mask, spk_embeddings)
        t_a_transformer_out = self.t_a(textf, acouf, u_mask, spk_embeddings)
        v_a_transformer_out = self.v_a(visuf, acouf, u_mask, spk_embeddings)

        v_v_transformer_out = self.v_v(visuf, visuf, u_mask, spk_embeddings)
        t_v_transformer_out = self.t_v(textf, visuf, u_mask, spk_embeddings)
        a_v_transformer_out = self.a_v(acouf, visuf, u_mask, spk_embeddings)

        # Unimodal-level Gated Fusion  单峰级门控融合
        t_t_transformer_out = self.t_t_gate(t_t_transformer_out)
        a_t_transformer_out = self.a_t_gate(a_t_transformer_out)
        v_t_transformer_out = self.v_t_gate(v_t_transformer_out)

        a_a_transformer_out = self.a_a_gate(a_a_transformer_out)
        t_a_transformer_out = self.t_a_gate(t_a_transformer_out)
        v_a_transformer_out = self.v_a_gate(v_a_transformer_out)

        v_v_transformer_out = self.v_v_gate(v_v_transformer_out)
        t_v_transformer_out = self.t_v_gate(t_v_transformer_out)
        a_v_transformer_out = self.a_v_gate(a_v_transformer_out)

        # 三模态
        # t_transformer_out = self.features_reduce_t(torch.cat([t_t_transformer_out, a_t_transformer_out, v_t_transformer_out], dim=-1))
        # a_transformer_out = self.features_reduce_a(torch.cat([a_a_transformer_out, t_a_transformer_out, v_a_transformer_out], dim=-1))
        # v_transformer_out = self.features_reduce_v(torch.cat([v_v_transformer_out, t_v_transformer_out, a_v_transformer_out], dim=-1))

        # 修改三模态
        t_transformer_out = self.features_reduce_t(torch.cat([t_t_transformer_out, t_a_transformer_out, t_v_transformer_out], dim=-1))
        a_transformer_out = self.features_reduce_a(torch.cat([a_a_transformer_out, a_t_transformer_out, a_v_transformer_out], dim=-1))
        v_transformer_out = self.features_reduce_v(torch.cat([v_v_transformer_out, v_t_transformer_out, v_a_transformer_out], dim=-1))

        # Multimodal-level Gated Fusion 多模态级门控融合
        all_transformer_out = self.last_gate(t_transformer_out, a_transformer_out, v_transformer_out)

        # 会话建模
        # 1 u s o 2 so us uo 3 uso
        # all_transformer_out = self.hier_trans(all_transformer_out, u_mask, spk_idx, mode='uo')

       # Emotion Classifier  情绪分类器
        t_final_out = self.t_output_layer(t_transformer_out)
        a_final_out = self.a_output_layer(a_transformer_out)
        v_final_out = self.v_output_layer(v_transformer_out)


        # 三模态情绪分类器
        all_final_out = self.all_output_layer(all_transformer_out)


        # 计算每个类别的概率
        t_log_prob = F.log_softmax(t_final_out, 2)
        a_log_prob = F.log_softmax(a_final_out, 2)
        v_log_prob = F.log_softmax(v_final_out, 2)

        all_log_prob = F.log_softmax(all_final_out, 2)

        all_prob = F.softmax(all_final_out, 2)

        # 计算每个类别的KL散度
        kl_t_log_prob = F.log_softmax(t_final_out /self.temp, 2)
        kl_a_log_prob = F.log_softmax(a_final_out /self.temp, 2)
        kl_v_log_prob = F.log_softmax(v_final_out /self.temp, 2)

        kl_all_prob = F.softmax(all_final_out /self.temp, 2)

        # 原始
        return t_log_prob, a_log_prob, v_log_prob, all_log_prob, all_prob, \
               kl_t_log_prob, kl_a_log_prob, kl_v_log_prob, kl_all_prob
