import imp
import torch
import torch.nn as nn
from loguru import logger
import numpy as np
from tqdm import tqdm
import sys

sys.path.append(".")
from utils.utils import *


def random_embedding(vocab_size, embedding_dim):
    """
    原始做法是用均匀分布初始化embedding
    本文使用了torch.normal
    """
    pretrain_emb = np.empty([vocab_size, embedding_dim])
    scale = np.sqrt(3.0 / embedding_dim)
    for index in range(vocab_size):
        pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedding_dim])
    return pretrain_emb


class SynoymViewFeature(nn.Module):
    def __init__(self, data):
        super(SynoymViewFeature, self).__init__()

        logger.info("Building Synoym View feature..")

        self.use_biword = data.use_bigram
        self.gpu = data.HP_gpu
        self.embedding_dim = data.word_emb_dim

        # Word embeddings, x^w_{b,e} in the paper
        self.Synword_embeddings = nn.Embedding(
            data.word_alphabet.size(), data.word_emb_dim
        )
        self.Synbiword_embeddings = nn.Embedding(
            data.biword_alphabet.size(), data.biword_emb_dim
        )

        # Position embeddings
        # x^{p1} in the paper
        self.pos1_embeddings = nn.Embedding(data.pos_size, data.pos_emb_dim)
        # x^{p2} in the paper
        self.pos2_embeddings = nn.Embedding(data.pos_size, data.pos_emb_dim)

        if data.pretrain_word_embedding is not None:
            self.Synword_embeddings.weight.data.copy_(
                torch.from_numpy(data.pretrain_word_embedding)
            )
        else:
            self.Synword_embeddings.weight.data.copy_(
                torch.normal(
                    mean=0,
                    std=1,
                    size=(data.word_alphabet.size(), data.word_emb_dim),
                )
            )

        if data.pretrain_biword_embedding is not None:
            self.Synbiword_embeddings.weight.data.copy_(
                torch.from_numpy(data.pretrain_biword_embedding)
            )
        else:
            self.Synbiword_embeddings.weight.data.copy_(
                torch.normal(
                    mean=0,
                    std=1,
                    size=(data.biword_alphabet.size(), data.biword_emb_dim),
                )
            )

        self.pos1_embeddings.weight.data.copy_(
            torch.normal(mean=0, std=1, size=(data.pos_size, data.pos_emb_dim))
        )
        self.pos2_embeddings.weight.data.copy_(
            torch.normal(mean=0, std=1, size=(data.pos_size, data.pos_emb_dim))
        )
        self.drop = nn.Dropout(data.HP_dropout)

        self.to_cuda()

    def to_cuda(self, device="cuda:0"):

        self.drop = self.drop.cuda(device)
        self.Synword_embeddings = self.Synword_embeddings.cuda(device)
        self.Synbiword_embeddings = self.Synbiword_embeddings.cuda(device)
        self.pos1_embeddings = self.pos1_embeddings.cuda(device)
        self.pos2_embeddings = self.pos2_embeddings.cuda(device)

    def forward(
        self,
        senseword_inputs,
        biword_inputs,
        pos1_inputs,
        pos2_inputs,
    ):

        # batch_size, seq_len= senseword_inputs.shape

        word_embs = self.Synword_embeddings(senseword_inputs)
        pos1_embs = self.pos1_embeddings(pos1_inputs)
        pos2_embs = self.pos2_embeddings(pos2_inputs)
        word_embs = torch.cat([word_embs, pos1_embs, pos2_embs], 2)

        if self.use_biword:
            biword_embs = self.Synbiword_embeddings(biword_inputs)
            word_embs = torch.cat([word_embs, biword_embs], 2)

        # if self.use_char:
        ## Calculate char CNN or LSTM features
        # char_features = self.char_feature.get_last_hiddens(
        #     char_inputs, char_seq_lengths.cpu().numpy()
        # )
        # char_features = char_features[char_seq_recover]
        # char_features = char_features.view(batch_size, sent_len, -1)
        ## Concat word and char together, combine the word and character info
        # word_embs = torch.cat([word_embs, char_features], 2)
        word_embs = self.drop(word_embs)

        return word_embs


LOAD_MODE = {"ins": 0, "entpair": 1, "relfact": 2, "multilabel": 3}
NULLKEY = "-null-"


def read_instance_with_syn_mode(
    input_file,
    gaz,
    word_alphabet,
    biword_alphabet,
    char_alphabet,
    # -----
    gaz_alphabet,
    label_alphabet,
    number_normalized,
    max_sent_length,
    word_sense_map,
    mode_type,
    words_larger_than_one=set(),
    char_padding_size=-1,
    char_padding_symbol="</pad>",
):
    fr = open(input_file, "r", encoding="utf-8")
    instence_texts = []
    instence_Ids = []

    bags = []

    # clean data
    def clean_data():
        ori_data = []
        freq = dict()

        for line in fr:
            line = line.strip().split("\t")
            if len(line) == 5:
                sentid, ent1, ent2, label, sent = line
            elif len(line) == 4:
                ent1, ent2, label, sent = line
            else:
                continue

            if ent1 not in sent:
                logger.info(ent1 + " not found in " + sent)
                continue
            if ent2 not in sent:
                logger.info(ent2 + " not found in " + sent)
                continue

            if label not in freq:
                freq[label] = 1
            else:
                freq[label] += 1

            ori_data.append(
                {"head": ent1, "tail": ent2, "relation": label, "sentence": sent}
            )

        return ori_data, freq

    ori_data, freq = clean_data()

    mode = LOAD_MODE[mode_type]
    # deal with multi-label answer
    if mode == 3:
        print("Merging data with same (head,tail,sent)...")
        tmp_data = ori_data
        tmp_data.sort(key=lambda a: a["head"] + "#" + a["tail"] + "#" + a["sentence"])
        ori_data = []
        prekey = ""
        curins = None
        for ins in tmp_data:
            curkey = ins["head"] + "#" + ins["tail"] + "#" + ins["sentence"]
            if curkey != prekey:
                if curins is None:
                    pass
                else:
                    ori_data.append(curins)
                curins = ins
                curins["mult-rel"] = [ins["relation"]]
                prekey = curkey
            else:
                curins["mult-rel"].append(ins["relation"])
        if not curins is None:
            ori_data.append(curins)
        print("Finish merging..")

    # Sort data by entities and relations
    ori_data.sort(key=lambda a: a["head"] + "#" + a["tail"] + "#" + a["relation"])

    bags = []
    prekey = ""
    curbag = []
    maxlen = 0
    # organize data into bags: each bag contains one or more instances according to the mode
    for didx, data in enumerate(ori_data):
        if mode == 1:
            curkey = data["head"] + "#" + data["tail"]
        elif mode == 2:
            curkey = data["head"] + "#" + data["tail"] + "#" + data["relation"]
        else:
            curkey = str(didx)
        if curkey != prekey:
            if len(curbag) > 0:
                bags.append(curbag)
            curbag = [data]
            prekey = curkey
        else:
            curbag.append(data)
    if len(curbag) > 0:
        bags.append(curbag)

    ent_cnt = 0
    ent_multi_cnt = 0
    UNK_id = gaz_alphabet.get_index(gaz_alphabet.UNKNOWN)
    for bag in tqdm(bags, desc="Syn {}...".format(input_file)):

        bag_texts = []
        bag_Ids = []

        for data in bag:
            words = []
            biwords = []
            chars = []

            word_Ids = []
            biword_Ids = []
            char_Ids = []

            pos1s = []
            pos2s = []

            ent1, ent2 = data["head"], data["tail"]
            sent = data["sentence"]
            if "mult-rel" in data:
                labels = list(set(data["mult-rel"]))
                labels_Ids = [label_alphabet.get_index(label) for label in labels]
            else:
                labels = [data["relation"]]
                labels_Ids = [label_alphabet.get_index(label) for label in labels]

            ent1 = str2list(ent1, words_larger_than_one)
            ent2 = str2list(ent2, words_larger_than_one)
            sent = str2list(sent, words_larger_than_one)

            lf1 = list_find(sent, ent1)
            assert lf1 != -1
            rg1 = lf1 + len(ent1) - 1
            lf2 = list_find(sent, ent2)
            assert lf2 != -1
            rg2 = lf2 + len(ent2) - 1

            ent1 = "".join(ent1)
            ent2 = "".join(ent2)

            if sent[-1] not in ["。"] and len(sent) < max_sent_length:
                sent.append("。")  # 预处理添加一个句号

            # internal information:
            #   char + biword + position
            #   radical information
            for widx, word in enumerate(sent):
                # """
                # 对字符级别的信息进行处理
                # """
                if number_normalized:
                    word = normalize_word(word)
                if widx < len(sent) - 1 and len(sent) > 2:
                    biword = word + sent[widx + 1]
                else:
                    biword = word + NULLKEY

                # words and bi-gram (not used)
                biwords.append(biword)
                words.append(word)
                word_Ids.append(word_alphabet.get_index(word))
                biword_Ids.append(biword_alphabet.get_index(biword))

                # character features (not uesd)
                char_list = []
                char_Id = []
                for char in word:
                    char_list.append(char)
                if char_padding_size > 0:
                    char_number = len(char_list)
                    if char_number < char_padding_size:
                        char_list = char_list + [char_padding_symbol] * (
                            char_padding_size - char_number
                        )
                    assert len(char_list) == char_padding_size

                for char in char_list:
                    char_Id.append(char_alphabet.get_index(char))
                chars.append(char_list)
                char_Ids.append(char_Id)

                # relative position
                pos1, pos2 = get_pos_embeded(widx, lf1, rg1, lf2, rg2, max_sent_length)
                pos1s.append(pos1)
                pos2s.append(pos2)

            # external information：
            #       lexicon + sense-level
            # deal with lexicon (sense-level)
            if ((max_sent_length < 0) or (len(words) <= max_sent_length)) and (
                len(words) > 0
            ):
                gazs = []
                gaz_Ids = []
                w_length = len(words)
                maxlen = max(maxlen, w_length)

                for widx in range(w_length):
                    # get all potential words that start from index widx
                    matched_list = gaz.enumerateMatchList(words[widx:])

                    matched_Id = []
                    matched_length = []
                    for entity in matched_list:
                        if gaz.space:
                            entity = entity.split(gaz.space)
                        entlen = len(entity)
                        entity = "".join(entity)
                        ent_ind = gaz_alphabet.get_index(entity)
                        ent_cnt += 1
                        if ent_ind == UNK_id:
                            # current word is polysemous word with more than one sense
                            if word_sense_map and entity in word_sense_map:
                                ent_multi_cnt += 1
                                # get all senses of current word
                                for cur_ent in word_sense_map[entity]:
                                    cur_ind = gaz_alphabet.get_index(cur_ent)
                                    matched_Id.append(cur_ind)
                                    matched_length.append(entlen)
                        else:
                            matched_Id.append(ent_ind)
                            matched_length.append(entlen)
                        gazs.append(entity)

                    if matched_Id:
                        # lexicon or sense
                        gaz_Ids.append([matched_Id, matched_length])
                    else:
                        gaz_Ids.append([])

                bag_texts.append([words, biwords, chars, gazs, ent1, ent2, labels])
                bag_Ids.append(
                    [
                        word_Ids,
                        biword_Ids,
                        char_Ids,
                        gaz_Ids,
                        labels_Ids,
                        pos1s,
                        pos2s,
                    ]
                )
            else:
                continue

        if len(bag_texts) > 0:
            instence_texts.append(bag_texts)
            instence_Ids.append(bag_Ids)

    logger.info(
        "{} Total entities: {}  Entities with multi-sense: {}  Ratio: {:.3f}%".format(
            input_file, ent_cnt, ent_multi_cnt, 100.0 * ent_multi_cnt / ent_cnt
        )
    )

    return instence_texts, instence_Ids, freq


if __name__ == "__main__":

    from utils.data import Data
    from utils.paths import *

    # from utils.functions import read_instance_with_gaz

    def data_initialization(
        data, gaz_file, sense_file, sense_map, train_file, dev_file, test_file
    ):
        data.build_alphabet(train_file)  # word + char + biword
        data.build_alphabet(dev_file)
        data.build_alphabet(test_file)
        # -------------
        # data.build_gaz_file(gaz_file)
        # data.build_gaz_alphabet(train_file, count=True)
        # data.build_gaz_alphabet(dev_file, count=True)
        # data.build_gaz_alphabet(test_file, count=True)
        # --------------
        # data.build_words_larger_one_set(char_emb)
        # Build bidrectional maps for [words & word senses].
        data.build_word_sense_map(sense_map)
        data.build_syn_file(sense_file)
        data.build_syn_alphabet(train_file)
        data.build_syn_alphabet(dev_file)
        data.build_syn_alphabet(test_file)

        data.fix_alphabet()
        return data

    data = Data()

    train_file = "./lexicon/sense/test.txt"
    dev_file = "./lexicon/sense/test.txt"
    test_file = "./lexicon/sense/test.txt"
    labels = "./lexicon/sense/relation2id.txt"

    data_initialization(
        data, gaz_file_ctb, sense_file, sense_map, train_file, dev_file, test_file
    )
    data.build_label_alphabet(labels)
    data.build_word_pretrain_emb(char_emb)
    data.build_gaz_pretrain_emb(gaz_file_ctb)

    model = SynoymViewFeature(data)
    # model()
    # data.build_biword_pretrain_emb(bichar_emb)

    instance, ids, freq = read_instance_with_syn_mode(
        train_file,
        data.syngaz,
        data.word_alphabet,
        data.biword_alphabet,
        data.char_alphabet,
        # ----
        data.syngaz_alphabet,
        data.label_alphabet,
        data.number_normalized,
        data.MAX_SENTENCE_LENGTH,
        data.word_sense_map,
        mode_type="ins",
    )

    import ipdb

    ipdb.set_trace()
    batch_instance = instance[0:32]
    from main import batchify_with_label

    # () =

    import ipdb

    ipdb.set_trace()

    data.show_data_summary()
