import torch
from torch import nn
from fastNLP.modules.torch import ConditionalRandomField, allowed_transitions
from data_loader import load_and_process
from fastNLP.embeddings.torch import StaticEmbedding
from fastNLP.core import seq_len_to_mask
import torch.nn.functional as F
from fastNLP import Vocabulary

class BiLSTM_CRF_CHAR(nn.Module):
    def __init__(
        self,
        word_embedding_dim,
        char_embedding_dim,
        hidden_size,
        char_state_dim,
        char_vocab_size,
        word_vocab_size,
        num_classes,
        pretrain_embedding_path,
        dropout=0.5,
        num_layers=1,
        target_vocab=None,
    ) -> None:
        super().__init__()

        self.char_embedding = nn.Embedding(char_vocab_size, char_embedding_dim)

        if pretrain_embedding_path is None:
            self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
        # elif pretrain_embedding_path == "senna":
        #     self.word_embedding = load_senna_embedding()
        else:
            self.word_embedding = BiLSTM_CRF_CHAR.load_senna_embedding()
            # self.word_embedding = nn.Embedding.from_pretrained(
            #     torch.load(pretrain_embedding_path), freeze=False
            # )

        self.char_lstm = nn.LSTM(
            char_embedding_dim,
            char_state_dim,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.word_lstm = nn.LSTM(
            word_embedding_dim + 2 * char_state_dim,
            hidden_size,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(2 * hidden_size, num_classes)

        trans = None
        if target_vocab is not None:
            assert (
                len(target_vocab) == num_classes
            ), "The number of classes should be same with the length of target vocabulary."
            trans = allowed_transitions(target_vocab.idx2word, include_start_end=True)
        self.crf = ConditionalRandomField(
            num_classes, include_start_end_trans=True, allowed_transitions=trans
        )
        
    def load_senna_embedding():
        vocab = Vocabulary(padding=None, unknown=None)
        with open(
            "/home/wangxiaoli/datasets/embeddings/senna_embeddings/words.lst", "r"
        ) as f:
            words = f.readlines()
            words = [word.strip() for word in words]
            vocab.add_word_lst(words)

        embedding = nn.Embedding(len(vocab), 50)

        with open(
            "/home/wangxiaoli/datasets/embeddings/senna_embeddings/embeddings.txt", "r"
        ) as f:
            embeddings = f.readlines()
            embeddings = [embedding.split() for embedding in embeddings]
            embeddings = [[float(num) for num in embedding] for embedding in embeddings]
            embeddings = torch.tensor(embeddings)
            embedding.weight.data.copy_(embeddings)
        return embedding

    def forward(
        self,
        words: torch.LongTensor,
        chars_index: torch.LongTensor,
        target: torch.LongTensor = None,
        seq_len: torch.LongTensor = None,
    ):
        char_embedding = self.char_embedding(chars_index)
        word_embedding = self.word_embedding(words)

        # char_lstm_out, _ = self.char_lstm(char_embedding)
        char_feature = torch.Tensor(words.shape[0], 1, self.char_lstm.hidden_size * 2)
        for i in range(char_embedding.shape[1]):
            char_lstm_out, _ = self.char_lstm(char_embedding[:, i, :, :])
            if i == 0:
                char_feature = char_lstm_out[:, -1, :]
                char_feature = torch.unsqueeze(char_feature, dim=1)
            else:
                char_feature = torch.cat(
                    (char_feature, char_lstm_out[:, -1, :].unsqueeze(dim=1)), dim=1
                )

        word_embedding = torch.cat((word_embedding, char_feature), dim=-1)
        word_lstm_out, _ = self.word_lstm(word_embedding)

        feats = self.fc(word_lstm_out)
        feats = self.dropout(feats)

        logits = F.log_softmax(feats, dim=-1)
        mask = seq_len_to_mask(seq_len)

        if target is None:
            pred, _ = self.crf.viterbi_decode(logits, mask)
            return {"pred": pred}
        else:
            loss = self.crf(logits, target, mask).mean()
            return {"loss": loss}

    def train_step(
        self,
        words: torch.LongTensor,
        chars_index: torch.LongTensor,
        target: torch.LongTensor = None,
        seq_len: torch.LongTensor = None,
    ):
        """
        :param words: 句子中 word 的 index，形状为 ``[batch_size, seq_len]``
        :param target: 每个 sample 的目标值
        :param seq_len: 每个句子的长度，形状为 ``[batch,]``
        :return: 如果 ``target`` 为 ``None``，则返回预测结果 ``{'pred': torch.Tensor}``，否则返回 loss ``{'loss': torch.Tensor}``
        """
        return self(words, chars_index, target, seq_len)

    def evaluate_step(
        self,
        words: torch.LongTensor,
        chars_index: torch.LongTensor,
        seq_len: torch.LongTensor = None,
    ):
        """
        :param words: 句子中 word 的 index，形状为 ``[batch_size, seq_len]``
        :param seq_len: 每个句子的长度，形状为 ``[batch,]``
        :return: 预测结果 ``{'pred': torch.Tensor}``
        """
        return self(words, chars_index, None, seq_len)


if __name__ == "__main__":
    databundle = load_and_process()
    word_vocab_size = len(databundle.get_vocab("words"))
    char_vocab_size = len(databundle.get_vocab("chars"))
    target_vocab = databundle.get_vocab("target")

    sentence = databundle.get_dataset("train")[0]

    model = BiLSTM_CRF_CHAR(
        # word_embedding_dim=100,
        word_embedding_dim=50,
        char_embedding_dim=50,
        hidden_size=100,
        char_state_dim=25,
        char_vocab_size=char_vocab_size,
        word_vocab_size=word_vocab_size,
        num_classes=len(target_vocab),
        pretrain_embedding_path=None,
        dropout=0.5,
        num_layers=1,
        target_vocab=target_vocab,
    )

    # for k, v in enumerate(sentence["chars_index"]):
    #     sentence["chars_index"][k] = torch.tensor(v)

    # # print(sentence['chars_index'])
    # sentence["chars_index"] = nn.utils.rnn.pad_sequence(sentence["chars_index"], batch_first=True)
    # # sentence['chars_index'] = torch.unsqueeze(sentence['chars_index'], dim=0)

    # char_index = torch.unsqueeze(sentence["chars_index"], dim=0)
    # char_index = torch.cat((char_index, char_index), dim=0)
    # print(

    #     model(
    #         torch.tensor([sentence["words"],sentence["words"]]),
    #         char_index,
    #         torch.tensor([sentence["target"], sentence["target"]]),
    #         torch.LongTensor([sentence["seq_len"], sentence["seq_len"]]),
    #     )
    # )
