from torch import nn
import torch
from fastNLP.modules.torch import ConditionalRandomField, allowed_transitions
import torch.nn.functional as F
from fastNLP.core import seq_len_to_mask
import sys
import os

sys.path.append("../../")
from utils import load_senna_embedding

from bilstm_crf_with_char.data_loader import load_and_process


class BiLSTM_CNN_CRF(nn.Module):
    def __init__(
        self,
        char_embedding_dim,
        word_embedding_dim,
        char_vocab_size,
        word_vocab_size,
        window_size,
        filters,
        hidden_size,
        target_vocab=None,
        num_classes=None,
        dropout=0.5,
        pretrain_embedding_path=None,
    ) -> None:
        super().__init__()
        self.max_char_len = 30
        self.char_embedding = nn.Embedding(char_vocab_size, char_embedding_dim)

        if pretrain_embedding_path is None:
            self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)
        else:
            self.word_embedding = load_senna_embedding()

        self.word_embedding = nn.Embedding(word_vocab_size, word_embedding_dim)

        self.cnn = nn.Conv2d(1, filters, (window_size, char_embedding_dim))
        self.maxpool = nn.MaxPool2d((self.max_char_len + 2 * 2 - window_size + 1, 1))
        self.bilstm = nn.LSTM(
            char_embedding_dim + word_embedding_dim,
            hidden_size,
            bidirectional=True,
            batch_first=True,
        )
        self.fc = nn.Linear(2 * hidden_size, num_classes)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)

        trans = None
        if target_vocab is not None:
            assert (
                len(target_vocab) == num_classes
            ), "The number of classes should be same with the length of target vocabulary."
            trans = allowed_transitions(target_vocab.idx2word, include_start_end=True)
        self.crf = ConditionalRandomField(
            num_classes, include_start_end_trans=True, allowed_transitions=trans
        )

    def forward(self, words, chars_index, target, seq_len):
        # char embedding size: (batch_size, seq_len, char_len, char_embedding_dim)
        char_embed = self.char_embedding(chars_index)
        char_embed = self.dropout(char_embed)

        char_resprention = torch.zeros(
            (char_embed.shape[0], char_embed.shape[1], 30), device="cuda:1"
        )
        for i in range(char_embed.shape[1]):
            # (batch, 1, char_len, char_embedding_dim)
            # word_char_embedding = char_embed[:, i, :, :].unsqueeze(1)

            # (batch, filters, h_out, 1)
            # char_features = self.relu(self.cnn(word_char_embedding))
            # char_features = self.maxpool(char_features).squeeze(-1).squeeze(-1)
            #
            # 代替maxpool, 输出shape： (batch, filters)
            # char_features = torch.max(char_features.squeeze(-1), dim=-1)[0]
            # char_resprention[:, i, :] = char_features

            char_resprention[:, i, :] = torch.max(
                self.relu(self.cnn(char_embed[:, i, :, :].unsqueeze(1))).squeeze(-1),
                dim=-1,
            )[0]

        word_embed = self.word_embedding(words)
        word_embed = self.dropout(word_embed)
        # word_embed = (
        #     word_embed.to(char_resprention.device)
        #     if torch.cuda.is_available()
        #     else word_embed
        # )

        # print(char_resprention.device)
        resp = torch.cat((word_embed, char_resprention), dim=-1)
        word_lstm_out, _ = self.bilstm(resp)

        feats = self.fc(word_lstm_out)
        feats = self.dropout(feats)

        logits = F.log_softmax(feats, dim=-1)
        mask = seq_len_to_mask(seq_len)

        if target is None:
            pred, _ = self.crf.viterbi_decode(logits, mask)
            return {"pred": pred}
        else:
            loss = self.crf(logits, target, mask).mean()
            return {"loss": loss}

    def train_step(
        self,
        words: torch.LongTensor,
        chars_index: torch.LongTensor,
        target: torch.LongTensor = None,
        seq_len: torch.LongTensor = None,
    ):
        """
        :param words: 句子中 word 的 index，形状为 ``[batch_size, seq_len]``
        :param target: 每个 sample 的目标值
        :param seq_len: 每个句子的长度，形状为 ``[batch,]``
        :return: 如果 ``target`` 为 ``None``，则返回预测结果 ``{'pred': torch.Tensor}``，否则返回 loss ``{'loss': torch.Tensor}``
        """
        return self(words, chars_index, target, seq_len)

    def evaluate_step(
        self,
        words: torch.LongTensor,
        chars_index: torch.LongTensor,
        seq_len: torch.LongTensor = None,
    ):
        """
        :param words: 句子中 word 的 index，形状为 ``[batch_size, seq_len]``
        :param seq_len: 每个句子的长度，形状为 ``[batch,]``
        :return: 预测结果 ``{'pred': torch.Tensor}``
        """
        return self(words, chars_index, None, seq_len)


if __name__ == "__main__":
    # cnn = nn.Conv2d(1, 4, (3, 10), padding=(2, 0))
    # input = torch.randn(1, 1, 5, 10)

    # # output cnn weight shape
    # print(cnn.weight.shape)

    # print(cnn(input).shape)
    databundle = load_and_process()
    word_vocab_size = len(databundle.get_vocab("words"))
    char_vocab_size = len(databundle.get_vocab("chars"))
    target_vocab = databundle.get_vocab("target")

    sentence = databundle.get_dataset("train")[0]

    model = BiLSTM_CNN_CRF(
        word_embedding_dim=50,
        char_embedding_dim=30,
        hidden_size=200,
        filters=30,
        window_size=3,
        word_vocab_size=word_vocab_size,
        char_vocab_size=char_vocab_size,
        target_vocab=target_vocab,
        num_classes=len(target_vocab),
    )

    for k, v in enumerate(sentence["chars_index"]):
        sentence["chars_index"][k] = torch.tensor(v)

    # print(sentence['chars_index'])
    sentence["chars_index"] = nn.utils.rnn.pad_sequence(
        sentence["chars_index"], batch_first=True
    )
    # sentence['chars_index'] = torch.unsqueeze(sentence['chars_index'], dim=0)

    char_index = torch.unsqueeze(sentence["chars_index"], dim=0)
    char_index = torch.cat((char_index, char_index), dim=0)

    print(
        model(
            torch.tensor([sentence["words"], sentence["words"]]),
            char_index,
            torch.tensor([sentence["target"], sentence["target"]]),
            torch.LongTensor([sentence["seq_len"], sentence["seq_len"]]),
        )
    )
