import torch
import torch.nn as nn
from transformers import BertModel
from fastNLP import Vocabulary
from loguru import logger

from fastNLP.embeddings.torch import CNNCharEmbedding, LSTMCharEmbedding
from model.config_layer import config_semantic
from transformers import BertModel
import sys

sys.path.append("./")
from utils.data import Data
from utils.alphabet import Alphabet


def alphabet2vocab(alphabet: Alphabet):
    vocab = Vocabulary()
    for word, idx in alphabet.iteritems():
        vocab.add_word(word)
    return vocab


class SemanticView(nn.Module):
    def __init__(self, data: Data):
        super(SemanticView, self).__init__()

        logger.info("   +SemanticView Feature")

        self.use_char = config_semantic["use_char"]
        self.char_embed_size = config_semantic["char_embed_size"]
        self.use_biword = config_semantic["use_biword"]
        self.biword_embed_size = config_semantic["biword_embed_size"]

        self.hidden_dim = 0
        self.pooler_dim = config_semantic["pooler_dim"]

        self.use_bert = config_semantic["use_bert"]
        self.bert_path = config_semantic["bert_path"]

        if self.use_char:
            logger.info("       +char LSTM..")
            self.hidden_dim += self.char_embed_size

            self.char_vocab = alphabet2vocab(data.word_alphabet)
            self.char_feature = LSTMCharEmbedding(
                self.char_vocab, embed_size=self.char_embed_size, min_char_freq=1
            )

        if self.use_biword:
            logger.info("           +bichar CNN..")
            self.hidden_dim += self.biword_embed_size

            self.biword_vocab = alphabet2vocab(data.biword_alphabet)
            self.biword_feature = CNNCharEmbedding(
                self.biword_vocab, embed_size=self.biword_embed_size, min_char_freq=1
            )

        if self.use_bert:
            logger.info("           +BERT..")
            self.bert = BertModel.from_pretrained(self.bert_path)
            self.hidden_dim += self.bert.config.hidden_size

        # self.pooler = nn.Sequential(
        #     nn.Dropout(),
        #     nn.Linear(self.hidden_dim, self.pooler_dim)
        # )

        # self.freeze_bert()

    def freeze_bert(self):
        for p in self.bert.parameters():
            p.requires_grad = False

    def to_device(self, device="cuda:0"):
        if self.use_char:
            self.char_feature.cuda(device)

        if self.use_biword:
            self.biword_feature.cuda(device)

        self.bert.cuda(device)
        self.dropout.cuda(device)

    def forward(
        self,
        batch_word,
        batch_biword,
        batch_bert,
        batch_bert_mask,
    ):

        if self.use_bert:
            seg_id = torch.zeros(batch_bert_mask.size()).long().cuda()
            bert_feature = self.bert(batch_bert, batch_bert_mask, seg_id)
            semantic_feature = bert_feature[0][:, 1:-1, :]  # only char embedding

        if self.use_char:
            char_feature = self.char_feature(batch_word)

            if self.use_bert:
                semantic_feature = torch.cat([semantic_feature, char_feature], dim=-1)
            else:
                semantic_feature = char_feature

        if self.use_biword:
            biword_feature = self.biword_feature(batch_biword)
            semantic_feature = torch.cat([semantic_feature, biword_feature], dim=-1)

        # if not self.use_bert:
        #     semantic_feature = self.pooler(semantic_feature)

        return semantic_feature


# class CharLSTM(nn.Module):
