import sys,os
current =  os.path.abspath(os.path.dirname(__file__))
current_parent = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(current)
sys.path.append(current_parent)

import numpy as np
from utils_common import load_pretrain_word_vectors

class SogouSimilarity():
    def __init__(self, file_name, embeddings_map):
        self.file_name = file_name
        self.embeddings_map = embeddings_map
        self.name = "SogouSimilarity"
    
    @classmethod
    def load(cls, pretrain_dir='../pre_train_embedding/sgns.sogou.char'):
        embeddings_map = load_pretrain_word_vectors(pretrain_dir)
        return cls(os.path.basename(pretrain_dir), embeddings_map)

    def similarity(self, word1, word2, split_mode=False):
        """都存在, 则返回向量内积.
        否则拆为单字, 累加向量.
        TODO: 也许能使用词表找到最适合的切词方式? e.g. 博士研究生 --> 博士 研究生.
        """
        word1, word2 = str(word1), str(word2)
        
        if word1 not in self.embeddings_map or word2 not in self.embeddings_map:
            if not split_mode: return 0
            else:
                emb1, emb2 = None, None
                if word1 not in self.embeddings_map:
                    emb1 = self.embed(word1, split_mode=True)
                if word2 not in self.embeddings_map:
                    emb2 = self.embed(word2, split_mode=True)
                
                if emb1 is None: emb1 = np.array(self.embeddings_map[word1], dtype=float)
                if emb2 is None: emb2 = np.array(self.embeddings_map[word2], dtype=float)
                
        else:
            emb1 = np.array(self.embeddings_map[word1], dtype=float)
            emb2 = np.array(self.embeddings_map[word2], dtype=float)
        
        return (emb1 @ emb2).item()

    def embed(self, word, split_mode=False):
        """返回词向量
        若单词没有出现, 尝试平均每个字向量.
        """
        if word in self.embeddings_map: return np.array(self.embeddings_map[word], dtype=float)
        elif split_mode:
            for char in word:
                if char not in self.embeddings_map:
                    raise ValueError(f"{char} is NOT in pretrain embedding!")
            emb = sum([np.array(self.embeddings_map[char], dtype=float) for char in word]) / len(word)
            return emb
        else:
            print(f"{word} is NOT in pretrain embedding!")
            return None
    
    def __len__(self,):
        return len(self.embeddings_map)

    def __str__(self):
        name = self.file_name
        file_len = self.__len__()
        dims = len(self.embeddings_map["1"])
        return f"SogouSimilarity from {name} with {file_len} words, {dims} dims."

    def __repr__(self):
        return self.__str__()



if __name__ == "__main__":
    s = SogouSimilarity.load()
    word1 = "硕士"
    word2 = "博士"
    print(s.similarity(word1, word2))

    s.embed("硕士研究生") # 不在词表

    word1 = "硕士"
    word2 = "硕士研究生"
    print(s.similarity(word1, word2, split_mode=True))

    word1 = "小学生"
    word2 = "硕士研究生"
    print(s.similarity(word1, word2, split_mode=True))

    




