import torch
import torch.nn as nn
import torch.nn.functional as F


# 定义多模态门融合类
class Multimodal_GatedFusion(nn.Module):
    # 初始化函数
    def __init__(self, hidden_size):
        super(Multimodal_GatedFusion, self).__init__()
        # 定义线性层
        self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
        # 定义softmax函数
        self.softmax = nn.Softmax(dim=-2)

    # 定义前向传播函数
    def forward(self, a, b, c):
        # 将a,b,c添加维度
        a_new = a.unsqueeze(-2)
        b_new = b.unsqueeze(-2)
        c_new = c.unsqueeze(-2)
        # 将a,b,c拼接
        utters = torch.cat([a_new, b_new, c_new], dim=-2)
        # 将a,b,c经过线性层
        utters_fc = torch.cat([self.fc(a).unsqueeze(-2), self.fc(b).unsqueeze(-2), self.fc(c).unsqueeze(-2)], dim=-2)
        # 将线性层结果经过softmax
        utters_softmax = self.softmax(utters_fc)
        # 将softmax结果和拼接结果进行乘法
        utters_three_model = utters_softmax * utters
        # 求和
        final_rep = torch.sum(utters_three_model, dim=-2, keepdim=False)
        # 返回结果
        return final_rep




# 定义多模态注意力融合类
class Multimodal_AttentionFusion(nn.Module):
    # 初始化函数
    def __init__(self, hidden_size):
        super(Multimodal_AttentionFusion, self).__init__()
        # 定义线性层
        self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
        # 定义softmax函数
        self.softmax = nn.Softmax(dim=-1)

    # 定义前向传播函数
    def forward(self, a, b, c):
        # 将a,b,c添加维度
        a_new = a.unsqueeze(-2)
        b_new = b.unsqueeze(-2)
        c_new = c.unsqueeze(-2)
        # 将a,b,c拼接
        utters = torch.cat([a_new, b_new, c_new], dim=-2)
        # 将a,b,c经过线性层
        utters_fc = torch.cat([self.fc(a).unsqueeze(-2), self.fc(b).unsqueeze(-2), self.fc(c).unsqueeze(-2)], dim=-2)
        # 将线性层结果经过softmax
        utters_softmax = self.softmax(utters_fc)
        # 将softmax结果和拼接结果进行乘法
        utters_three_model = utters_softmax * utters
        # 求和
        final_rep = torch.sum(utters_three_model, dim=-2, keepdim=False)
        # 返回结果
        return final_rep




# 定义多模态加权融合类
class Multimodal_Att_GatedFusion(nn.Module):
    # 初始化函数
    def __init__(self, hidden_size):
        super(Multimodal_Att_GatedFusion, self).__init__()
        # 定义线性层
        self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
        # 定义注意力权重计算层
        self.attention = nn.Linear(hidden_size, 1, bias=False)
        # 定义softmax函数
        self.softmax = nn.Softmax(dim=-2)

    # 定义前向传播函数
    def forward(self, a, b, c):
        # 将a,b,c添加维度
        a_new = a.unsqueeze(-2)
        b_new = b.unsqueeze(-2)
        c_new = c.unsqueeze(-2)
        # 将a,b,c拼接
        utters = torch.cat([a_new, b_new, c_new], dim=-2)
        # 将每个模态的表示通过线性层
        utters_fc = torch.cat([self.fc(a).unsqueeze(-2), self.fc(b).unsqueeze(-2), self.fc(c).unsqueeze(-2)], dim=-2)
        # 计算注意力权重
        attention_scores = self.attention(utters_fc)
        # 将注意力权重通过softmax
        attention_weights = self.softmax(attention_scores)
        # 利用注意力权重加权不同模态的表示
        weighted_utters = attention_weights * utters
        # 求和得到最终的融合表示
        final_rep = torch.sum(weighted_utters, dim=-2, keepdim=False)
        # 返回结果
        return final_rep

