import torch
import torch.nn as nn
import torch.nn.functional as F
import math


# 激活函数 gelu函数：
def gelu(x):
    #返回gelu函数的值
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))

#PositionwiseFeedForward函数：
class PositionwiseFeedForward(nn.Module):
    #初始化函数
    def __init__(self, d_model, d_ff, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        #定义线性层
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        #定义LayerNorm层
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
        #定义激活函数
        self.actv = gelu
        #定义Dropout层
        self.dropout_1 = nn.Dropout(dropout)
        self.dropout_2 = nn.Dropout(dropout)

    #前向传播函数
    def forward(self, x):
        #计算激活函数的值
        inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
        #计算输出值
        output = self.dropout_2(self.w_2(inter))
        #返回输出值
        return output + x


# 定义多头注意力模型
class MultiHeadedAttention(nn.Module):
    # 初始化函数，head_count为头数，model_dim为模型维度，dropout为dropout比例
    def __init__(self, head_count, model_dim, dropout=0.1):
        # 断言模型维度除以头数必须为0
        assert model_dim % head_count == 0
        # 每个头维度
        self.dim_per_head = model_dim // head_count
        # 模型维度
        self.model_dim = model_dim

        super(MultiHeadedAttention, self).__init__()
        # 头数
        self.head_count = head_count

        # 线性层，计算key，value，query
        self.linear_k = nn.Linear(model_dim, head_count * self.dim_per_head)
        self.linear_v = nn.Linear(model_dim, head_count * self.dim_per_head)
        self.linear_q = nn.Linear(model_dim, head_count * self.dim_per_head)
        # softmax
        self.softmax = nn.Softmax(dim=-1)
        # dropout
        self.dropout = nn.Dropout(dropout)
        # 线性层，计算输出
        self.linear = nn.Linear(model_dim, model_dim)

    # 计算多头注意力
    def forward(self, key, value, query, mask=None):
        # batch_size
        batch_size = key.size(0)
        # 每个头维度
        dim_per_head = self.dim_per_head
        # 头数
        head_count = self.head_count

        # 计算形状
        def shape(x):
            """  projection """
            return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)

        # 计算形状
        def unshape(x):
            """  compute context """
            return x.transpose(1, 2).contiguous() \
                .view(batch_size, -1, head_count * dim_per_head)

        # 计算key，value，query
        key = self.linear_k(key).view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)
        value = self.linear_v(value).view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)
        query = self.linear_q(query).view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)

        # 计算query
        query = query / math.sqrt(dim_per_head)
        # 计算scores
        scores = torch.matmul(query, key.transpose(2, 3))

        # 如果有mask，则将scores中的mask部分置为-1e10
        if mask is not None:
            mask = mask.unsqueeze(1).expand_as(scores)
            scores = scores.masked_fill(mask, -1e10)

        # 计算attn
        attn = self.softmax(scores)
        # 计算drop_attn
        drop_attn = self.dropout(attn)
        # 计算context
        context = torch.matmul(drop_attn, value).transpose(1, 2).\
                    contiguous().view(batch_size, -1, head_count * dim_per_head)
        # 计算输出
        output = self.linear(context)
        return output


# 定义位置编码类
class PositionalEncoding(nn.Module):
    # 初始化函数
    def __init__(self, dim, max_len=512):
        super(PositionalEncoding, self).__init__()
        # 初始化位置编码
        pe = torch.zeros(max_len, dim)
        # 获取位置
        position = torch.arange(0, max_len).unsqueeze(1)
        # 计算分母
        div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *
                              -(math.log(10000.0) / dim)))
        # 计算正弦位置编码
        pe[:, 0::2] = torch.sin(position.float() * div_term)
        # 计算余弦位置编码
        pe[:, 1::2] = torch.cos(position.float() * div_term)
        # 将位置编码添加到缓冲区
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
    # 前向传播函数
    def forward(self, x, speaker_emb):
        # 获取序列长度
        L = x.size(1)
        # 获取位置编码
        pos_emb = self.pe[:, :L]
        # 将位置编码和说话人嵌入添加到输入
        x = x + pos_emb + speaker_emb
        return x


# 定义TransformerEncoderLayer类，用于构建Transformer编码层
class TransformerEncoderLayer(nn.Module):
    # 初始化函数，参数分别为d_model（模型维度），heads（头数），d_ff（全连接层维度），dropout（dropout比例）
    def __init__(self, d_model, heads, d_ff, dropout):
        super(TransformerEncoderLayer, self).__init__()
        # 初始化自注意力层
        self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)
        # 初始化全连接层
        self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
        # 初始化层归一化
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
        # 初始化dropout
        self.dropout = nn.Dropout(dropout)

    # 定义前向传播函数，参数分别为iter（迭代次数），inputs_a（输入a），inputs_b（输入b），mask（mask）
    def forward(self, iter, inputs_a, inputs_b, mask):
        # 如果输入a和输入b相等
        if inputs_a.equal(inputs_b):
            # 如果迭代次数不为0
            if (iter != 0):
                # 对输入b进行层归一化
                inputs_b = self.layer_norm(inputs_b)
            else:
                # 否则直接使用输入b
                inputs_b = inputs_b

            # 对mask添加维度
            mask = mask.unsqueeze(1)
            # 计算自注意力层输出
            context = self.self_attn(inputs_b, inputs_b, inputs_b, mask=mask)
        else:
            # 如果迭代次数不为0
            if (iter != 0):
                # 对输入b进行层归一化
                inputs_b = self.layer_norm(inputs_b)
            else:
                # 否则直接使用输入b
                inputs_b = inputs_b

            # 对mask添加维度
            mask = mask.unsqueeze(1)
            # 计算自注意力层输出
            context = self.self_attn(inputs_a, inputs_a, inputs_b, mask=mask)

        # 计算输出
        out = self.dropout(context) + inputs_b
        # 计算全连接层输出
        return self.feed_forward(out)


# 定义TransformerEncoder类，继承nn.Module
class TransformerEncoder(nn.Module):
    # 初始化函数，参数包括d_model（模型维度），d_ff（全连接层维度），heads（头数），layers（层数），dropout（dropout比例）
    def __init__(self, d_model, d_ff, heads, layers, dropout=0.1):
        super(TransformerEncoder, self).__init__()
        self.d_model = d_model
        self.layers = layers
        self.pos_emb = PositionalEncoding(d_model)
        self.transformer_inter = nn.ModuleList(
            [TransformerEncoderLayer(d_model, heads, d_ff, dropout)
             for _ in range(layers)])
        self.dropout = nn.Dropout(dropout)

    # 定义前向传播函数，参数包括x_a（输入），x_b（输出），mask（mask矩阵），speaker_emb（说话人嵌入）
    def forward(self, x_a, x_b, mask, speaker_emb):
        # 如果输入和输出相同，则进行编码
        if x_a.equal(x_b):
            # 对输入进行位置编码
            x_b = self.pos_emb(x_b, speaker_emb)
            # 对输入进行dropout
            x_b = self.dropout(x_b)
            # 进行编码
            for i in range(self.layers):
                x_b = self.transformer_inter[i](i, x_b, x_b, mask.eq(0))
        # 如果输入和输出不同，则进行编码
        else:
            # 对输入进行位置编码
            x_a = self.pos_emb(x_a, speaker_emb)
            # 对输入进行dropout
            x_a = self.dropout(x_a)
            # 对输出进行位置编码
            x_b = self.pos_emb(x_b, speaker_emb)
            # 对输出进行dropout
            x_b = self.dropout(x_b)
            # 进行编码
            for i in range(self.layers):
                x_b = self.transformer_inter[i](i, x_a, x_b, mask.eq(0))
        # 返回编码结果
        return x_b