# coding: utf-8
"""

"""
import torch
import torch.nn as nn
from data_loader import *
config = Config()

class BiLSTM(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, dropout, word2id, tag2id):
        super().__init__()
        self.name = "BiLSTM"
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.vocab_size = len(word2id) + 1
        self.tag_to_id = tag2id
        self.tag_size = len(tag2id)

        self.word_embedding = nn.Embedding(self.vocab_size, self.embedding_dim)  # 词嵌入层，输入维度为词表大小，输出维度为词嵌入维度。
        self.dropout = nn.Dropout(dropout)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2, bidirectional=True, batch_first=True)
        self.hidden2tag = nn.Linear(hidden_dim, self.tag_size)

    def forward(self, x, mask):
        # x: [batch_size, seq_len]
        embedding = self.word_embedding(x)
        # embedding: [batch_size, seq_len, embedding_dim]
        outputs, hidden = self.lstm(embedding)
        # outputs: [batch_size, seq_len, hidden_dim]
        outputs = outputs * mask.unsqueeze(-1)
        outputs = self.dropout(outputs)
        # outputs: [batch_size, seq_len, tag_size]
        outputs = self.hidden2tag(outputs)
        return outputs


if __name__ == '__main__':
    embedding_dim = config.embedding_dim
    hidden_dim = config.hidden_dim
    dropout = config.dropout
    tag2id = config.tag2id
    bilstm = BiLSTM(embedding_dim, hidden_dim, dropout, word2id, tag2id)
    train_dataloader, dev_dataloader = get_data()
    for input_ids_padded, label_padded, attention_mask in train_dataloader:
        outputs = bilstm(input_ids_padded, attention_mask)
        print(outputs.shape)
        break
