import torch
import torch.nn as nn


class BiLSTM(nn.Module):
    def __init__(self, embedding_num, hidden_num, corpus_num, class_num, pad_index, bi=True):
        """
        embedding_num: 词向量维数
        hidden_num: 隐藏层大小
        corpus_num: 字典大小
        class_num: 标注种类
        pad_index: 填充索引
        bi: 是否使用双向LSTM
        """
        super().__init__()
        self.embedding_num = embedding_num
        self.hidden_num = hidden_num
        self.corpus_num = corpus_num
        self.bi = bi

        self.embedding = nn.Embedding(corpus_num, embedding_num)
        self.lstm = nn.LSTM(embedding_num, hidden_num, batch_first=True, bidirectional=bi)

        if bi:
            self.classifier = nn.Linear(hidden_num * 2, class_num)
        else:
            self.classifier = nn.Linear(hidden_num, class_num)

        self.cross_loss = nn.CrossEntropyLoss(ignore_index=pad_index)   # 交叉熵损失

    def forward(self, data_idx, data_len, tag_idx=None, pack_pro=True):
        embedding = self.embedding(data_idx)
        if pack_pro:
            pack = nn.utils.rnn.pack_padded_sequence(embedding, data_len, batch_first=True)
            output, _ = self.lstm(pack)
            output, lens = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
        else:
            output, _ = self.lstm(embedding)

        pre = self.classifier(output)   # 预测值

        self.pre = torch.argmax(pre, dim=-1).reshape(-1)

        if tag_idx is not None:
            loss = self.cross_loss(pre.reshape(-1, pre.shape[-1]), tag_idx.reshape(-1))
            return loss

