import torch

from models.encoder_model._base import EncoderModule
from utils import Params
import torch.nn as nn

class BiLSTMEncoderModule(EncoderModule):

    def __init__(self, params: Params):
        super(BiLSTMEncoderModule, self).__init__(params=params)
        self.lstm_layer = nn.LSTM(
            input_size=self.params.config.hidden_size,
            hidden_size=self.params.encoder_lstm_hidden_size,
            num_layers=self.params.encoder_lstm_layers,
            # 若lstm只有一层没必要dropout
            dropout=0.0 if self.params.encoder_lstm_layers == 1 else self.params.encoder_lstm_dropout,
            bidirectional=True,
            batch_first=False
        )
        lstm_output_size = self.params.encoder_lstm_hidden_size*2
        self.lstm_norm_layer = nn.LayerNorm(lstm_output_size) if self.params.encoder_lstm_with_ln else nn.Identity()  # 否则就是原始值nn.Identity()
        self.fc_layer = nn.Linear(lstm_output_size, self.params.encoder_output_size, bias=False)  # 让bias为0，使得lstm的mask达到效果

    def forward(self, input_feature, input_mask,**kwargs):
        input_feature = torch.permute(input_feature, dims=[1, 0, 2])  # [N,T,E]->[T,N,E]
        input_mask = torch.permute(input_mask, dims=[1, 0])  # [N,T]->[T,N]
        input_mask_weights = input_mask.unsqueeze(-1).to(input_feature.dtype)
        max_len, _ = input_mask.size()

        # LSTM提取序列特征信息
        # pack_padded_sequence中默认batch_first: bool = False所以需要转换下格式
        embed = nn.utils.rnn.pack_padded_sequence(input_feature, input_mask.sum(0).long(), enforce_sorted=False)
        lstm_output, _ = self.lstm_layer(embed)  # [T,N,hidden_size*2]双向所以隐藏层乘2
        lstm_output, _ = nn.utils.rnn.pad_packed_sequence(lstm_output, total_length=max_len)  # [T,N,hidden_size*2]
        # 增加一个维度，方法很多
        lstm_output = lstm_output * input_mask_weights

        # Norm层防止模型过拟合同时加快训练速度
        lstm_output = self.lstm_norm_layer(lstm_output)

        # LSTM后的特征融合
        encoder_feature = self.fc_layer(lstm_output)
        if self.fc_layer.bias is not None:
            encoder_feature = encoder_feature * input_mask_weights  # 如果有bias就再乘一遍清0

        encoder_feature = torch.permute(encoder_feature, dims=[1, 0, 2])  # 维度再换回去
        return encoder_feature  # [N,T,E]

if __name__ == '__main__':
    pass
