import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence


class BiLSTM(nn.Module):
    def __init__(self, vocab_size, label_num):
        super(BiLSTM, self).__init__()

        # embeding
        self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=256)

        # bilstm，没有BiLSTM对象，只有LSTM，bidirectional
        self.blstm = nn.LSTM(
            input_size=256,
            hidden_size=512,
            bidirectional=True,
            num_layers=1
        )

        # 线性层, 最终输出是发射概率矩阵
        self.linear = nn.Linear(in_features=1024, out_features=label_num)

    def forward(self, inputs, length):
        # 嵌入层，得到向量
        outputs_embed = self.embed(inputs)

        # 得到的每句话的结果会被补0
        outputs_packd = pack_padded_sequence(outputs_embed, length)

        # 把压缩后的结果输入到lstm中
        outputs_blstm, (hn, cn) = self.blstm(outputs_packd)

        # 把结果长度填充一致
        outputs_paded, outputs_lengths = pad_packed_sequence(outputs_blstm)

        # 调整形状，batch_size放在下标为0的维度
        outputs_paded = outputs_paded.transpose(0, 1)

        # 线性层
        outputs_logits = self.double(outputs_paded)

        # 取出每句话真实长度发射概率矩阵
        outputs = []

        for outputs_logit, outputs_length in zip(outputs_logits, outputs_lengths):
            outputs.append(outputs_logit[:outputs_length])

        return outputs

    def predict(self, inputs):
        output_embed = self.embed(inputs)

        # 在batch size增加一个维度1
        output_embed = output_embed.unsqueeze(1)

        output_blstm, (hn, cn) = self.blstm(output_embed)

        output_blstm = output_blstm.squeeze(1)

        output_linear = self.linear(output_blstm)

        return output_linear

if __name__ == '__main__':
    char_to_id = {"双": 0, "肺": 1, "见": 2, "多": 3, "发": 4, "斑": 5, "片": 6,
                  "状": 7, "稍": 8, "高": 9, "密": 10, "度": 11, "影": 12, "。": 13}

    # 参数2:标签码表对照
    tag_to_id = {"O": 0, "B-dis": 1, "I-dis": 2, "B-sym": 3, "I-sym": 4}
    bilstm = BiLSTM(vocab_size=len(char_to_id),
               label_num=len(tag_to_id),)
    print(bilstm)