"""
siamese
"""
import torch
import torch.nn as nn

import config


class SiameseNetWork(nn.Module):
    def __init__(self):
        super(SiameseNetWork, self).__init__()
        self.embedding_q = nn.Embedding(num_embeddings=len(config.sort_q_ws_model),
                                        embedding_dim=config.sort_embedding_dim,
                                        padding_idx=config.sort_q_ws_model.PAD)
        self.embedding_sim_q = nn.Embedding(num_embeddings=len(config.sort_sim_q_ws_model),
                                            embedding_dim=config.sort_embedding_dim,
                                            padding_idx=config.sort_sim_q_ws_model.PAD)
        self.gru1 = nn.GRU(input_size=config.sort_embedding_dim,
                           hidden_size=config.sort_hidden_size,
                           num_layers=config.sort_num_layers,
                           batch_first=True, bidirectional=True)
        self.gru2 = nn.GRU(input_size=config.sort_hidden_size * 4,
                           hidden_size=config.sort_hidden_size,
                           num_layers=config.sort_num_layers,
                           batch_first=True)
        self.dnn = nn.Sequential(
            nn.Linear(in_features=config.sort_hidden_size * 4,
                      out_features=config.sort_hidden_size),
            nn.ELU(inplace=True),
            nn.BatchNorm1d(config.sort_hidden_size),
            nn.Dropout(config.sort_drop_out),

            nn.Linear(config.sort_hidden_size, config.sort_hidden_size),
            nn.ELU(inplace=True),
            nn.BatchNorm1d(config.sort_hidden_size),
            nn.Dropout(config.sort_drop_out),

            nn.Linear(config.sort_hidden_size, 2)
        )

    def forward(self, input1, input2):
        """
        input1: [batch_size, max_len]
        input2: [batch_size, max_len]
        """
        # 1. 创建mask
        # mask1: [batch_size, max_len]  # 将input1中值为config.sort_q_ws_model.PAD值设置为True， 其余的设置为False
        # mask2: [batch_size, max_len]  # 同mask1
        mask1, mask2 = input1.eq(config.sort_q_ws_model.PAD), input2.eq(config.sort_sim_q_ws_model.PAD)

        # 2. Embedding
        # input1: [batch_size, max_len, embedding_dim]
        # input2: 同input1
        input1 = self.embedding_q(input1)
        input2 = self.embedding_sim_q(input2)

        # 3. GRU
        # input1: [batch_size, max_len, hidden_size * 2], 因为是双向GRU，所以隐藏层乘以2
        # input2: 同input1
        # hidden_state1:[2*num_layers, batch_size, hidden_size], 因为是双向GRU所以第一项要乘以2
        # hidden_state2: 同hidden_state1
        input1, hidden_state1 = self.gru1(input1)
        input2, hidden_state2 = self.gru1(input2)

        # 4. Attention
        # x1_align: [batch_size, max_len, hidden_size*2]
        # x2_align: [batch_size, max_len, hidden_size*2]
        input1_align, input2_align = self.sort_attention_align(input1, input2, mask1, mask2)

        # 5. 将input和Attention的结果进行拼接
        # input1_concat: [batch_size, max_len, hidden_size*4]
        # input2_concat: 同input11_conacat
        input1_concat = torch.cat([input1, input1_align], dim=-1)
        input2_concat = torch.cat([input2, input2_align], dim=-1)

        # 6. GRU
        # input1_concat: [batch_size, max_len, hidden_size]
        # print(type(input1_concat))
        input1_concat, _ = self.gru2(input1_concat)
        input2_concat, _ = self.gru2(input2_concat)

        # 7. 池化操作
        # [batch_size, hidden_size*2]
        # print(type(input1_concat))
        input1_pooled = self.apply_pooling(input1_concat)
        input2_pooled = self.apply_pooling(input2_concat)

        # 8. 合并
        # input_data: [batch_size, hidden_size*4]
        input_data = torch.cat([input1_pooled, input2_pooled], dim=-1)

        # 8. DNN
        # output_data: [batch_size, 2]
        output_data = self.dnn(input_data)

        # [batch_size, 2]
        return nn.functional.softmax(output_data, dim=-1)

    @staticmethod
    def apply_pooling(x):
        # x: [batch_size, max_len ,hidden_size]
        # 进行平均池化
        # 参数分别为：input(输入的数据)， kernel_size（窗口大小）, stride（步长）
        # 等号右边squeeze(-1)前: [batch_size, hidden_size, 1]
        # p1: [batch_size, hidden_size]
        p1 = nn.functional.avg_pool1d(x.transpose(1, 2), x.size(1)).squeeze(-1)
        # 进行最大化池化
        # p2: [batch_size, hidden_size]
        p2 = nn.functional.max_pool1d(x.transpose(1, 2), x.size(1)).squeeze(-1)

        # [batch_size, hidden_size*2]
        return torch.cat([p1, p2], dim=-1)

    @staticmethod
    def sort_attention_align(x1, x2, mask1, mask2):
        """
        x1: [batch_size, max_len, hidden_size*2]
        x2: [batch_size, max_len, hidden_size*2]
        mask1: [batch_size, max_len]
        mask2: [batch_size, max_len]
        """
        # 1. 忽略填充词
        # mask1.float()是将值为True的置为1， 值为False的置为0
        # mask1.float().masked_fill_(mask1, float("-inf"))是将值为1的部分置为-inf，其余的还是0，之后再对mask1操作时只
        # 会改变值为0的位置的数据，不会计算值为-inf的数据，达到减少计算量的效果
        mask1 = mask1.float().masked_fill_(mask1, float("-inf"))
        mask2 = mask2.float().masked_fill_(mask2, float("-inf"))

        # 2. 计算总的权重
        # attention_weight: [batch_size, max_len, max_len]
        attention_weight = x1.bmm(x2.transpose(1, 2))

        # 3. 计算x1的context vector
        # 3.1 计算x1经过softmax后的结果
        # weight1: [batch_size, max_len, max_len]
        weight1 = nn.functional.softmax(attention_weight + mask2.unsqueeze(1), dim=-1)
        # 3.2 计算context vector
        # x1_align: [batch_size, max_len, hidden_size*2]
        x1_align = weight1.bmm(x2)

        # 4. 计算x2的context vector
        # 4.1 计算x2经过softmax后的结果
        weight2 = nn.functional.softmax(attention_weight.transpose(1, 2) + mask1.unsqueeze(1), dim=-1)
        # 4.2 计算context vector
        # x1_align: [batch_size, max_len, hidden_size*2]
        x2_align = weight2.bmm(x1)

        return x1_align, x2_align
