import time, pickle
import torch
from tqdm import tqdm
import numpy as np
from sklearn.preprocessing import normalize
import pickle

from utils_fasttext import biGramHash, triGramHash
from toolkits import similarity_by_cilin, similarity_by_sogou

UNK, PAD = '<UNK>', '<PAD>'
class TokenizerWrapper():
    """word to idx. 
    """
    def __init__(self, vocab, tokenizer):
        self.vocab = vocab
        self.tokenizer = tokenizer

    @classmethod
    def load(cls, config, args):
        with open(config.vocab_path, 'rb') as f1:
            vocab = pickle.load(f1)
        tokenizer = lambda x: [i for i in x]
        config.n_vocab = len(vocab)
        return cls(vocab=vocab, tokenizer=tokenizer)

    def parse(self, text):
        """纯分词.
        """
        return self.tokenizer(text)

    def __call__(self, texts, pad_size=32, return_tensors=None, args=None):
        if return_tensors: assert return_tensors in ["pt", "tf"]
        tokens_all = [self.parse(line) for line in texts]
        tokens_idx_all = [[self.vocab.get(token, self.vocab.get(UNK)) for token in tokens] for tokens in tokens_all]
        tokens_lens = [len(line) for line in tokens_all]

        pad_idx = self.vocab.get(PAD)
        for i in range(len(tokens_idx_all)):
            if len(tokens_idx_all[i]) < pad_size:
                tokens_idx_all[i].extend([pad_idx] * (pad_size - len(tokens_idx_all[i])))
            else:
                tokens_idx_all[i] = tokens_idx_all[i][:pad_size]
        
        if args and args.model == "FastText":
            #(words_line, seq_len, bigram, trigram) 
            fasttest_lines = []
            buckets = args.n_gram_vocab
            
            for tokens, seq_len in zip(tokens_idx_all, tokens_lens):
                bigram, trigram = [], []
                for i in range(pad_size):
                    bigram.append(biGramHash(tokens, i, buckets))
                    trigram.append(triGramHash(tokens, i, buckets))
                fasttest_lines.append((tokens, seq_len, bigram, trigram))
            
            if return_tensors=="pt":
                tokens_tensor = torch.LongTensor([item[0] for item in fasttest_lines])
                seq_lentokens_tensor = torch.LongTensor([item[1] for item in fasttest_lines])
                bigram_tensor = torch.LongTensor([item[2] for item in fasttest_lines])
                trigram_tensor = torch.LongTensor([item[3] for item in fasttest_lines])
                return (tokens_tensor, seq_lentokens_tensor, bigram_tensor, trigram_tensor)
            else:
                return fasttest_lines
        else:
            return torch.LongTensor(tokens_idx_all) if return_tensors=="pt" else tokens_idx_all


class SimilarityWrapper():
    def __init__(self, Similarity, words_list=None, simi_matrix=None):
        self.Similarity = Similarity
        self.model_name = Similarity.name
        self.similarity = Similarity.similarity
        self.words_list = words_list
        self.simi_matrix = simi_matrix
        
    @classmethod
    def load(cls, model_name="CilinSimilarity", path="toolkits/_data/cilin.txt"):
        assert model_name in ["CilinSimilarity", "SogouSimilarity"]
        if model_name == "CilinSimilarity":
            return cls(similarity_by_cilin.CilinSimilarity.load(path))
        elif model_name == "SogouSimilarity":
            return cls(similarity_by_sogou.SogouSimilarity.load(path))

    @classmethod
    def load_from_pkl(cls, path_pkl="CilinSimilarity_v1.0.pkl", path_dict="toolkits/_data/cilin.txt"):
        with open(path_pkl, "rb") as f1:
            model_name, words_list, simi_matrix = pickle.load(f1)
        
        if model_name == "CilinSimilarity":
            return cls(similarity_by_cilin.CilinSimilarity.load(path_dict),
                        words_list=words_list,
                        simi_matrix=simi_matrix)
        elif model_name == "SogouSimilarity":
            return cls(similarity_by_sogou.SogouSimilarity.load(path_dict),
                        words_list=words_list,
                        simi_matrix=simi_matrix)

    def set_simi_mode(self, mode="similarity"):
        """设置计算相似度的模式. 
        """
        if self.model_name == "CilinSimilarity":
            assert mode in ["similarity", "sim2013", "sim2016"]
            if mode == "similarity": self.similarity = self.Similarity.similarity
            elif mode == "sim2013": self.similarity = self.Similarity.sim2013
            else: self.similarity = self.Similarity.sim2016

        elif self.model_name == "SogouSimilarity":
            assert mode in ["similarity"]
            if mode == "similarity": self.similarity = self.Similarity.similarity

        else:
            raise ValueError(f"Current model {self.model_name} DONT support {mode}.")

    def build_simi_matrix(self, words_list, cache_it=False):
        """计算给定word list词语之间的相似度.
        """
        assert type(words_list) == list
        simi_matrix = np.zeros((len(words_list), len(words_list)), dtype=float)
        # diagonal_idx = list(range((len(words_list))))
        # simi_matrix[diagonal_idx,diagonal_idx] = 1

        pbar = tqdm(total = (len(words_list)*(len(words_list) - 1)) // 2, desc="calculating similarity matrix")
        # 下三角
        for i in range(len(words_list)):
            for j in range(0, i):
                simi_matrix[i,j] = self.similarity(words_list[i], words_list[j])
                pbar.update()
        
        if cache_it: self.words_list, self.simi_matrix = words_list, simi_matrix
        return simi_matrix

    def get_simi_words(self, word, similarity_threhold = 0.5):
        """基于simi_matrix获取大于相似阈值(区间为[0,1])的所有词.
        """
        if self.simi_matrix is None:
            print(f"Must call build_simi_matrix(, cache_it=True) first.")
            return
        if word not in self.words_list:
            raise ValueError(f"{word} is NOT in current words list.")
        
        target_idx = self.words_list.index(word)
        # 按行归一化
        simi_matrix_normed = normalize(self.simi_matrix, axis=1, norm='max')
        # print(simi_matrix_normed)
        sim_threhold_matrix = np.where(simi_matrix_normed >= similarity_threhold, 1, 0)
        target_row = sim_threhold_matrix[target_idx]
        target_simi_words = [self.words_list[int(i)] for i in target_row.nonzero()[0]]
        return target_simi_words

    def save_pkl(self, path="./", version="v1.0"):
        content = (self.model_name, self.words_list, self.simi_matrix)
        file_name = f"{path}/{self.model_name}_{version}.pkl"
        with open(file_name, "wb") as f1:
            pickle.dump(content, f1)
        print(f"Saved to {file_name}")

    def __str__(self,):
        return str(self.Similarity)

    def __repr__(self):
        return self.__str__()

    def __len__(self,):
        return len(self.Similarity)

if __name__ == "__main__":
    w1 = "硕士"
    w2 = "博士"
    cs = SimilarityWrapper.load(model_name="CilinSimilarity", path="toolkits/_data/cilin.txt")
    sim1 = cs.similarity(w1, w2)
    print(sim1)

    cs.set_simi_mode("sim2013")
    sim2 = cs.similarity(w1, w2)
    print(sim2)

    cs.set_simi_mode("sim2016")
    sim3 = cs.similarity(w1, w2)
    print(sim3)

    cs.build_simi_matrix(["硕士", "博士", "汽车"], cache_it=True)
    res = cs.get_simi_words("硕士", similarity_threhold = 0.5)
    print(res)
