import torch
import torch.nn as nn
import torch.nn.functional as F

# 定义一个Unimodal_GatedFusion类，继承自nn.Module
# class Unimodal_GatedFusion(nn.Module):
#     # 初始化函数，参数为隐藏层大小和数据集
#     def __init__(self, hidden_size, dataset):
#         super(Unimodal_GatedFusion, self).__init__()
#         # 定义一个线性层，输入为隐藏层大小，输出为隐藏层大小，不使用偏置
#         self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
#         # 如果数据集为MELD，则将线性层的权重设置为单位矩阵，并且不使用梯度
#         # init.kaiming_uniform_(self.fc.weight, nonlinearity='relu')
#
#         if dataset == 'MELD':
#             # self.fc.weight.data.copy_(torch.eye(hidden_size, hidden_size))
#             self.fc.weight.requires_grad = True
#
#     # 定义前向传播函数，参数为a
#     def forward(self, a):
#         # 使用sigmoid函数对线性层的输出进行激活
#         z = torch.sigmoid(self.fc(a))
#         # 计算最终 representation
#         final_rep = z * a
#         # 返回最终 representation
#         return final_rep


class SelfAttention(nn.Module):
    def __init__(self, hidden_size):
        super(SelfAttention, self).__init__()
        self.query = nn.Linear(hidden_size, hidden_size)
        self.key = nn.Linear(hidden_size, hidden_size)
        self.value = nn.Linear(hidden_size, hidden_size)

    def forward(self, x):
        query = self.query(x)
        key = self.key(x)
        value = self.value(x)

        scores = torch.matmul(query, key.transpose(-2, -1)) / torch.sqrt(
            torch.tensor(query.size(-1), dtype=torch.float32))

        attention_weights = F.softmax(scores, dim=-1)

        context = torch.matmul(attention_weights, value)

        return context

# 添加了正则化技术（L2正则化）、注意力机制和非线性激活函数：
class Unimodal_GatedFusion(nn.Module):
    def __init__(self, hidden_size, dataset, dropout=0.1, l2_regularization=0.001):
        super(Unimodal_GatedFusion, self).__init__()
        self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
        self.dropout = nn.Dropout(dropout)
        self.l2_regularization = l2_regularization
        self.attention = SelfAttention(hidden_size)
        self.activation = nn.ReLU()

        if dataset == 'MELD':
            nn.init.xavier_uniform_(self.fc.weight)

    def forward(self, a):
        z = torch.sigmoid(self.fc(a))
        z = self.dropout(z)

        # 添加 L2 正则化
        if self.l2_regularization > 0:
            l2_loss = self.l2_regularization * torch.norm(self.fc.weight)
            z += l2_loss

        # 注意力机制
        # attended_a = self.attention(a)
        # attended_rep = z * attended_a
        z = z * a
        # # 应用非线性激活函数
        final_rep = self.activation(z)
        # final_rep = attended_rep

        return final_rep
