# -*- coding: utf-8 -*-

import sys
import numpy as np
from utils.alphabet import Alphabet
from utils.functions import *
from utils.utils import *
from utils.gazetteer import Gazetteer
from loguru import logger
from tqdm import tqdm

START = "</s>"  # 用于表示句子开始的标记
UNKNOWN = "</unk>"  # 用于表示未知词的标记
PADDING = "</pad>" # 用于表示填充的标记
NULLKEY = "-null-" # 用于表示空的或无效的标记

#代码重复
def build_pretrain_embedding(embedding_path, word_alphabet, embedd_dim=100, norm=True):
    """
    从预训练的词向量文件中加载词向量并初始化一个词向量矩阵
    :param embedding_path: 预训练词向量文件路径
    :param word_alphabet: 用于映射词到索引的字典对象
    :param embedd_dim: 词向量的维度，默认为100
    :param norm: 是否对词向量进行归一化处理
    :return: 预训练的词向量矩阵，和词向量的维度
    """
    logger.info("Build from {}...".format(embedding_path))

    embedd_dict = dict() # 用于存储从文件中读取的词向量字典
    if embedding_path != None:
        embedd_dict, embedd_dim = load_pretrain_emb(embedding_path)# 加载预训练词向量

    scale = np.sqrt(3.0 / embedd_dim)
    pretrain_emb = np.empty([word_alphabet.size(), embedd_dim])
    perfect_match = 0
    case_match = 0
    not_match = 0
    pretrain_emb[0, :] = np.random.uniform(-scale, scale, [1, embedd_dim])
    for word, index in tqdm(
        word_alphabet.instance2index.items(), desc="Load {}..".format(embedding_path)
    ):
        if word in embedd_dict:
            if norm:
                pretrain_emb[index, :] = norm2one(embedd_dict[word])
            else:
                pretrain_emb[index, :] = embedd_dict[word]
            perfect_match += 1
        elif word.lower() in embedd_dict:
            if norm:
                pretrain_emb[index, :] = norm2one(embedd_dict[word.lower()])
            else:
                pretrain_emb[index, :] = embedd_dict[word.lower()]
            case_match += 1
        else:
            pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedd_dim])
            not_match += 1
    pretrained_size = len(embedd_dict)
    print(
        "Embedding:\n     pretrain word:%s, prefect match:%s, case_match:%s, oov:%s, oov%%:%s"
        % (
            pretrained_size,
            perfect_match,
            case_match,
            not_match,
            (not_match + 0.0) / word_alphabet.size(),
        )
    )
    return pretrain_emb, embedd_dim

#代码重复
def norm2one(vec):
    root_sum_square = np.sqrt(np.sum(np.square(vec)))
    return vec / root_sum_square
#代码重复

def load_pretrain_emb(embedding_path):
    embedd_dim = -1
    embedd_dict = dict()
    with open(embedding_path, "r", encoding="utf-8") as file:
        for line in tqdm(file):
            line = line.strip()
            if len(line) == 0 or len(line) == 2:
                continue
            tokens = line.split()
            if len(tokens) == 2:
                fmt = "Total {} word, dim  {}\n".format(tokens[0], tokens[1])
                print(fmt)
                continue
            if embedd_dim < 0:
                embedd_dim = len(tokens) - 1
            else:
                assert embedd_dim + 1 == len(tokens)
            embedd = np.empty([1, embedd_dim])
            embedd[:] = tokens[1:]
            embedd_dict[tokens[0]] = embedd
    return embedd_dict, embedd_dim


class Data:
    def __init__(self):
        self.multiview_feature_dim = 768 # 多视角特征维度
        self.hidden_dim = 512 # 隐藏层维度
        self.MAX_SENTENCE_LENGTH = 250 # 最大句子长度
        self.MAX_WORD_LENGTH = -1  # 最大单词长度，-1表示无限制

        self.pos_size = self.MAX_SENTENCE_LENGTH * 2 + 3 # 位置大小，可能与句子长度相关
        # 归一化设置
        self.number_normalized = True # 是否归一化数字
        self.norm_word_emb = True # 是否归一化词嵌入
        self.norm_biword_emb = True # 是否归一化双词嵌入
        self.norm_gaz_emb = False # 是否归一化字典嵌入
        # 字典初始化
        self.word_alphabet = Alphabet("word") # 单词词汇表
        self.biword_alphabet = Alphabet("biword") # 双词词汇表
        self.char_alphabet = Alphabet("character") # 字符词汇表

        self.label_alphabet = Alphabet("label", True) # 标签词汇表，True表示是标签
        self.train_freq = {} # 训练集中标签的频率统计
        # 字典文件加载参数
        self.gaz_lower = False # 字典是否小写化
        self.gaz = Gazetteer(self.gaz_lower) # 初始化字典对象
        self.gaz_alphabet = Alphabet("gaz") # 字典词汇表
        self.gaz_count = {} # 字典计数
        self.biword_count = {}
        # 同义词字典相关
        self.syngaz_lower = False  # 同义词字典是否小写化
        self.syngaz = Gazetteer(self.syngaz_lower) # 同义词字典对象
        self.syngaz_alphabet = Alphabet("Syngaz") # 同义词词汇表
        self.words_longer_than_one = set() # 长度大于1的词集合
        self.word_sense_map = dict()  # 单词到词义的映射
        self.sense_word_map = dict() # 词义到单词的映射
        # 预训练嵌入文件和模型设置
        self.use_bert = False # 是否使用BERT模型

        # ================
        # 训练、验证、测试数据集
        self.train_texts = []
        self.dev_texts = []
        self.test_texts = []
        self.train_Ids = []
        self.dev_Ids = []
        self.test_Ids = []
        # 嵌入维度
        self.word_emb_dim = 50
        self.biword_emb_dim = 50
        self.char_emb_dim = 50
        self.gaz_emb_dim = 50

        # ==============
        # 预训练嵌入
        self.pretrain_word_embedding = None
        self.pretrain_biword_embedding = None
        self.pretrain_gaz_embedding = None

        # ===============
        # 词汇表大小
        self.word_alphabet_size = 0
        self.biword_alphabet_size = 0
        self.char_alphabet_size = 0
        self.label_alphabet_size = 0

    def show_data_summary(self):
        pass
        # print("DATA SUMMARY START:")
        # print("     Tag          scheme: %s" % (self.tagScheme))
        # print("     MAX SENTENCE LENGTH: %s" % (self.MAX_SENTENCE_LENGTH))
        # print("     MAX   WORD   LENGTH: %s" % (self.MAX_WORD_LENGTH))
        # print("     Number   normalized: %s" % (self.number_normalized))
        # print("     Use          bigram: %s" % (self.use_bigram))
        # print("     Word  alphabet size: %s" % (self.word_alphabet_size))
        # print("     Biword alphabet size: %s" % (self.biword_alphabet_size))
        # print("     Char  alphabet size: %s" % (self.char_alphabet_size))
        # print("     Gaz   alphabet size: %s" % (self.gaz_alphabet.size()))
        # print("     Label alphabet size: %s" % (self.label_alphabet_size))
        # print("     Word embedding size: %s" % (self.word_emb_dim))
        # print("     Biword embedding size: %s" % (self.biword_emb_dim))
        # print("     Char embedding size: %s" % (self.char_emb_dim))
        # print("     Gaz embedding size: %s" % (self.gaz_emb_dim))
        # print("     Norm     word   emb: %s" % (self.norm_word_emb))
        # print("     Norm     biword emb: %s" % (self.norm_biword_emb))
        # print("     Norm     gaz    emb: %s" % (self.norm_gaz_emb))
        # print("     Norm   gaz  dropout: %s" % (self.gaz_dropout))
        # print("     Train instance number: %s" % (len(self.train_texts)))
        # print("     Dev   instance number: %s" % (len(self.dev_texts)))
        # print("     Test  instance number: %s" % (len(self.test_texts)))
        # print("     Raw   instance number: %s" % (len(self.raw_texts)))
        # print("     Hyperpara  iteration: %s" % (self.HP_iteration))
        # print("     Hyperpara  batch size: %s" % (self.HP_batch_size))
        # print("     Hyperpara          lr: %s" % (self.HP_lr))
        # print("     Hyperpara    lr_decay: %s" % (self.HP_lr_decay))
        # print("     Hyperpara     HP_clip: %s" % (self.HP_clip))
        # print("     Hyperpara    momentum: %s" % (self.HP_momentum))
        # print("     Hyperpara  hidden_dim: %s" % (self.HP_hidden_dim))
        # print("     Hyperpara     dropout: %s" % (self.HP_dropout))
        # print("     Hyperpara  lstm_layer: %s" % (self.HP_lstm_layer))
        # print("     Hyperpara      bilstm: %s" % (self.HP_bilstm))
        # print("     Hyperpara         GPU: %s" % (self.HP_gpu))
        # print("     Hyperpara     use_gaz: %s" % (self.HP_use_gaz))
        # print("     Hyperpara fix gaz emb: %s" % (self.HP_fix_gaz_emb))
        # print("     Hyperpara    use_char: %s" % (self.HP_use_char))
        # print("     Hyperpara    use_biword: %s" % (self.HP_use_biword))
        # print("     Hyperpara    BERT: %s" % (self.use_bert))

        # if self.HP_use_char:
        #     print("             Char_features: %s" % (self.char_features))
        # print("DATA SUMMARY END.")
        # # sys.stdout.flush()
    #构建标签词汇表
    def build_label_alphabet(self, input_file):
        logger.info("Build {}..", format(input_file))
        in_lines = open(input_file, "r", encoding="utf-8").readlines()
        #文件的每一行包含标签信息，按空格或制表符分割关系和id
        for line in in_lines:
            if len(line) > 2:
                try:
                    rel, id = line.strip().split(" ")
                except:
                    rel, id = line.strip().split("\t")

                self.label_alphabet.add(rel)#添加标签到词汇表

        self.label_alphabet_size = self.label_alphabet.size() #标签词汇表大小
        self.num_classes = self.label_alphabet_size #类别数
        logger.info("Class: {}".format(self.label_alphabet.get_content()))

    # build weight of each relation label for loss function based on frequence
    #为了损失函数基于频率构建每个关系标签的权重
    def build_weights(self, mode="default"):
        """
        处理类别不平衡的新的Trick.
        """
        weights = np.ones(self.num_classes)
        for label, cnt in self.train_freq.items():
            weights[self.label_alphabet.get_index(label)] += cnt
        if mode == "reciprocal":#倒数
            self.weights = 1.0 / weights
        elif mode == "smooth": #平滑
            self.weights = 1.0 / (weights**0.06)
        else:
            self.weights = np.ones(self.num_classes)

    # record word -> word sense & word sense -> word
    #构建单词-词义映射
    def build_word_sense_map(self, input_file):
        logger.info("Build {}...".format(input_file))
        if input_file:
            fr = open(input_file, "r", encoding="utf-8")
            for line in fr:
                if len(line) < 5:
                    continue
                line = line.strip().split(" ")
                self.word_sense_map[line[0]] = set(line[1:])
                for sense in line[1:]:
                    self.sense_word_map[sense] = line[0]

    # character whose length larger than one: <N> (refers to number)
    #构建大于1长度的单词集合
    def build_words_larger_one_set(self, char_emb=""):
        logger.info("Build {}...".format(char_emb))

        if char_emb:
            with open(char_emb, "r", encoding="utf-8") as file:
                for line in file:
                    line = line.strip()
                    if len(line) <= 1:
                        continue
                    tokens = line.split()
                    if len(tokens) <= 3:
                        continue
                    if len(tokens[0]) > 1:
                        self.words_longer_than_one.add(tokens[0])

    def build_alphabet(self, input_file):
        """
        TODO: 针对不同的输入需要重写
        """
        logger.info("Build {}...".format(input_file))

        in_lines = open(input_file, "r", encoding="utf-8").readlines()
        for idx in tqdm(range(len(in_lines))):
            if len(in_lines[idx]) > 4:

                sent = in_lines[idx].strip().split("\t")[-1]
                sent = str2list(sent, self.words_longer_than_one)

                rel = in_lines[idx].strip().split("\t")[2]

                if self.train_freq.get(rel, -1) != -1:
                    self.train_freq[rel] += 1
                else:
                    self.train_freq[rel] = 1

                for widx, word in enumerate(sent):
                    if self.number_normalized:
                        word = normalize_word(word)
                    self.word_alphabet.add(word)

                    if widx < len(sent) - 1 and len(sent) > 2:
                        nxtword = sent[widx + 1]
                        if self.number_normalized:
                            nxtword = normalize_word(nxtword)
                    else:
                        nxtword = NULLKEY

                    biword = word + nxtword

                    self.biword_alphabet.add(biword)
                    for char in word:
                        self.char_alphabet.add(char)

        self.word_alphabet_size = self.word_alphabet.size()
        self.biword_alphabet_size = self.biword_alphabet.size()
        self.char_alphabet_size = self.char_alphabet.size()

        logger.info("Class weight: {}".format(self.train_freq))

    def build_syn_file(self, sense_file):
        logger.info("Build sense from {}..".format(sense_file))

        if sense_file:
            visit = set()
            fins = open(sense_file, "r", encoding="utf-8").readlines()
            for fin in tqdm(fins, desc="Build {}".format(sense_file)):
                fin = fin.strip().split()[0]
                if fin:
                    if self.sense_word_map:
                        if fin in self.sense_word_map:
                            fin = self.sense_word_map[fin]
                        if fin in visit:
                            continue
                        visit.add(fin)

                    fin = str2list(fin, self.words_longer_than_one)
                    self.syngaz.insert(fin, "one_source")
            logger.info(
                "Load gaz file: {}, total size {}".format(
                    sense_file, self.syngaz.size()
                )
            )
        else:
            logger.info("Gaz file is None, load nothing")

    def build_radical_file(self, radical_path):
        logger.info("Build radical from {}..".format(radical_path))

        self.radical_info = dict()
        with open(radical_path, "r") as f:
            lines = f.readlines()
            for line in tqdm(lines, desc="Loading {}".format(radical_path)):
                char, info = line.split("\t", 1)
                self.radical_info[char] = info.replace("\n", "").split("\t")

    def build_gaz_file(self, gaz_file):
        logger.info("Build Gaz from {}..".format(gaz_file))
        ## build gaz file,initial read gaz embedding file
        if gaz_file:
            fins = open(gaz_file, "r", encoding="utf-8").readlines()
            for fin in tqdm(fins, desc="Load {}..".format(gaz_file)):
                fin = fin.strip().split()[0]
                if fin:
                    self.gaz.insert(fin, "one_source")
            print("Load gaz file: ", gaz_file, " total size:", self.gaz.size())
        else:
            print("Gaz file is None, load nothing")

    def build_radical_alphabet(self, input_file):
        logger.info("Build radical {}..".format(input_file))
        """
            TODO!
        """
        self.radical_alphabet = Alphabet("radical")

    def build_syn_alphabet(self, input_file):
        """
        TODO 对输入的语料构建词表
        """
        logger.info("Build sys {}..".format(input_file))

        in_lines = open(input_file, "r", encoding="utf-8").readlines()
        word_list = []
        for line in tqdm(in_lines, desc="Build {}..".format(input_file)):
            if len(line) > 4:
                sent = line.strip().split("\t")[-1]  # [head tail sent]
                sent = str2list(sent, self.words_longer_than_one)
                for word in sent:
                    if self.number_normalized:
                        word = normalize_word(word)
                    word_list.append(word)
                w_length = len(word_list)

                for idx in range(w_length):
                    matched_entity = self.syngaz.enumerateMatchList(word_list[idx:])
                    for entity in matched_entity:
                        if self.gaz.space:
                            entity = "".join(entity.split(self.gaz.space))
                        if self.word_sense_map and entity in self.word_sense_map:
                            for sense in self.word_sense_map[entity]:
                                self.syngaz_alphabet.add(sense)
                        else:
                            self.syngaz_alphabet.add(entity)
                word_list = []
        logger.info(
            "{} Sense alphabet size: {}".format(input_file, self.syngaz_alphabet.size())
        )

    def build_gaz_alphabet(self, input_file, count=False):
        logger.info("Build gaz alphabet from {}".format(input_file))

        in_lines = open(input_file, "r", encoding="utf-8").readlines()
        word_list = []
        for line in in_lines:
            if len(line):

                sent = line.strip().split("\t")[-1]
                sent = str2list(sent, self.words_longer_than_one)
                for word in sent:
                    if self.number_normalized:
                        word = normalize_word(word)
                    word_list.append(word)

                w_length = len(word_list)
                entitys = []
                for idx in range(w_length):
                    matched_entity = self.gaz.enumerateMatchList(word_list[idx:])
                    entitys += matched_entity
                    for entity in matched_entity:

                        self.gaz_alphabet.add(entity)
                        index = self.gaz_alphabet.get_index(entity)

                        self.gaz_count[index] = self.gaz_count.get(
                            index, 0
                        )  ## initialize gaz count

                if count:
                    entitys.sort(key=lambda x: -len(x))
                    while entitys:
                        longest = entitys[0]
                        longest_index = self.gaz_alphabet.get_index(longest)
                        self.gaz_count[longest_index] = (
                            self.gaz_count.get(longest_index, 0) + 1
                        )

                        gazlen = len(longest)
                        for i in range(gazlen):
                            for j in range(i + 1, gazlen + 1):
                                covering_gaz = longest[i:j]
                                if covering_gaz in entitys:
                                    entitys.remove(covering_gaz)

                word_list = []
        print("gaz alphabet size:", self.gaz_alphabet.size())

    def fix_alphabet(self):
        self.word_alphabet.close()
        self.biword_alphabet.close()
        self.char_alphabet.close()
        self.label_alphabet.close()
        self.gaz_alphabet.close()

    def build_word_pretrain_emb(self, emb_path):
        print("build word pretrain emb...")
        self.pretrain_word_embedding, self.word_emb_dim = build_pretrain_embedding(
            emb_path, self.word_alphabet, self.word_emb_dim, self.norm_word_emb
        )

    def build_biword_pretrain_emb(self, emb_path):
        print("build biword pretrain emb...")
        self.pretrain_biword_embedding, self.biword_emb_dim = build_pretrain_embedding(
            emb_path, self.biword_alphabet, self.biword_emb_dim, self.norm_biword_emb
        )

    def build_gaz_pretrain_emb(self, emb_path):
        print("build gaz pretrain emb...")
        self.pretrain_gaz_embedding, self.gaz_emb_dim = build_pretrain_embedding(
            emb_path, self.gaz_alphabet, self.gaz_emb_dim, self.norm_gaz_emb
        )

    def generate_instance_with_gaz(self, input_file, name):
        self.fix_alphabet()
        if name == "train":
            self.train_texts, self.train_Ids = read_instance_with_gaz(
                input_file,
                self.char_alphabet,
                self.word_alphabet,
                self.biword_alphabet,
                # ----
                self.gaz,
                self.gaz_alphabet,
                self.gaz_count,
                # ----
                self.label_alphabet,
                self.number_normalized,
                self.MAX_SENTENCE_LENGTH,
            )
        elif name == "dev":
            self.dev_texts, self.dev_Ids = read_instance_with_gaz(
                input_file,
                self.char_alphabet,
                self.word_alphabet,
                self.biword_alphabet,
                # ----
                self.gaz,
                self.gaz_alphabet,
                self.gaz_count,
                # ----
                self.label_alphabet,
                self.number_normalized,
                self.MAX_SENTENCE_LENGTH,
            )
        elif name == "test":
            self.test_texts, self.test_Ids = read_instance_with_gaz(
                input_file,
                self.char_alphabet,
                self.word_alphabet,
                self.biword_alphabet,
                # ----
                self.gaz,
                self.gaz_alphabet,
                self.gaz_count,
                # ----
                self.label_alphabet,
                self.number_normalized,
                self.MAX_SENTENCE_LENGTH,
            )
        else:
            print(
                "Error: you can only generate train/dev/test instance! Illegal input:%s"
                % (name)
            )
