from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm

# from fastNLP import logger
# logger.set_stdout("none", level="WARN")
from loguru import logger

from fastNLP import Vocabulary
from fastNLP.embeddings.torch import CNNCharEmbedding

import sys

sys.path.append("./")
from utils.data import Data
from utils.alphabet import Alphabet
from utils.gazetteer import Gazetteer

radical_path = "./lexicon/chaizi/chaizi-ft.txt"
char_info = dict()
with open(radical_path, "r",encoding="utf-8") as f:
    lines = f.readlines()
    for line in tqdm(lines, desc="Loading {}".format(radical_path)):
        char, info = line.split("\t", 1)
        char_info[char] = info.replace("\n", "").split("\t")


def char2radical(c):
    if c in char_info.keys():
        c_info = char_info[c]
        return list(c_info[0])
    return ["○"]


def construct_radical_vocab_from_vocab(
    char_vocab: Vocabulary, include_word_start_end=True
):
    radical_vocab = Vocabulary()
    for char, _ in char_vocab:
        # if not char_vocab._is_word_no_create_entry(char):
        radical_vocab.add_word_lst(char2radical(char))
    if include_word_start_end:
        radical_vocab.add_word_lst(["<bow>", "<eow>"])
    return radical_vocab


def alphabet2vocab(phabet: Alphabet):
    vocab = Vocabulary()
    for word, _ in phabet.iteritems():
        vocab.add_word(word)
    return vocab


from model.config_layer import config_radical


class RadicalViewFeature(nn.Module):
    def __init__(
        self,
        data: Data,
        filter_nums: List[int] = (40, 30, 20),
        kernel_sizes: List[int] = (5, 3, 1),
        pool_method: str = "max",
        include_word_start_end: bool = True,
    ):

        super(RadicalViewFeature, self).__init__()

        self.radical_feature_dim = config_radical["radical_feature_dim"]
        self.pooler_dim = config_radical["pooler_dim"]

        for kernel in kernel_sizes:
            assert kernel % 2 == 1, "Only odd kernel is allowed."
        assert pool_method in ("max", "avg")
        self.pool_method = pool_method
        self.include_word_start_end = include_word_start_end

        # Build char vocab
        self.chars_vocab = alphabet2vocab(data.word_alphabet)
        # Build radical vocab
        self.radical_vocab = construct_radical_vocab_from_vocab(self.chars_vocab)

        # ============================
        # 对vocab进行index
        max_radical_nums = max(map(lambda x: len(char2radical(x[0])), self.chars_vocab))
        if include_word_start_end:
            max_radical_nums += 2
        self.char_pad_index = self.radical_vocab.padding_idx

        self.chars_to_radicals_embedding = (
            torch.full(
                (len(self.chars_vocab), max_radical_nums),
                fill_value=self.char_pad_index,
            )
            .long()
            .cuda()
        )
        self.word_lengths = torch.zeros(len(self.chars_vocab)).long().cuda()

        for word, index in self.chars_vocab:
            word = char2radical(word)
            if self.include_word_start_end:
                word = ["<bow>"] + word + ["<eow>"]
            self.chars_to_radicals_embedding[index, : len(word)] = torch.LongTensor(
                [self.radical_vocab.to_index(c) for c in word]
            )
            self.word_lengths[index] = len(word)

        # # ==============================
        self.radical_embedding = nn.Embedding(
            len(self.radical_vocab), self.radical_feature_dim
        )
        self.radical_convs = nn.ModuleList(
            [
                nn.Conv1d(
                    self.radical_feature_dim,
                    filter_nums[i],
                    kernel_size=kernel_sizes[i],
                    bias=True,
                    padding=kernel_sizes[i] // 2,
                )
                for i in range(len(kernel_sizes))
            ]
        )
        # self.radical_cnn_feature= CNNCharEmbedding(self.radical_vocab, self.radical_feature_dim)

        self.pooler = nn.Sequential(
            nn.Tanh(),
            nn.Linear(sum(filter_nums), self.pooler_dim),
        )

    def forward(self, words):
        """
        输入words的index后，生成对应的words的表示。

        :param words: [batch_size, max_len]
        :return: [batch_size, max_len, embed_size]
        """

        batch_size, max_len = words.size()

        # # batch_size x max_len x max_word_len
        chars = self.chars_to_radicals_embedding[words]
        # batch_size x max_len
        word_lengths = self.word_lengths[words]

        max_word_len = word_lengths.max()
        chars = chars[:, :, :max_word_len]

        # 为1的地方为mask
        # batch_size x max_len x max_word_len 如果为0, 说明是padding的位置了
        chars_masks = chars.eq(self.char_pad_index)
        # batch_size x max_len x max_word_len 如果为0, 说明是padding的位置了

        chars = self.radical_embedding(chars)
        reshaped_chars = chars.reshape(batch_size * max_len, max_word_len, -1)
        reshaped_chars = reshaped_chars.transpose(1, 2)  # B' x E x M
        conv_chars = [
            conv(reshaped_chars)
            .transpose(1, 2)
            .reshape(batch_size, max_len, max_word_len, -1)
            for conv in self.radical_convs
        ]
        # B x max_len x max_word_len x sum(filters)
        conv_chars = torch.cat(conv_chars, dim=-1).contiguous()
        # conv_chars = self.radical_cnn_feature(chars)

        if self.pool_method == "max":
            # batch_size x max_len x sum(filters)
            conv_chars = conv_chars.masked_fill(
                chars_masks.unsqueeze(-1), float("-inf")
            )
            chars, _ = torch.max(conv_chars, dim=-2)
        else:
            conv_chars = conv_chars.masked_fill(chars_masks.unsqueeze(-1), 0)
            chars = (
                torch.sum(conv_chars, dim=-2)
                / chars_masks.eq(False).sum(dim=-1, keepdim=True).float()
            )

        return self.pooler(chars)  # [b, l, dim]
        # return chars
