# from transformers import AutoTokenizer, AutoModel
# import torch
# import torch.nn.functional as F

# #Mean Pooling - Take attention mask into account for correct averaging
# def mean_pooling(model_output, attention_mask):
#     token_embeddings = model_output[0] #First element of model_output contains all token embeddings
#     input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
#     return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

# # sentences = [
# #     "Why can't I see the credit limit?",
# #     "Why do I have no credit limit even though I opened the account?",
# #     "I borrowed money, but it hasn't been approved yet, can I cancel it?",
# #     "Why do I keep getting that page when I apply for a line of credit and enter my password?",
# # ]

# # Sentences we want sentence embeddings for
# sentences = ['This is an example sentence', 'Each sentence is converted']

# model_dir = '/home/tanchenwei/all-mpnet-base-v2'

# # Load model from HuggingFace Hub
# tokenizer = AutoTokenizer.from_pretrained(model_dir)
# model = AutoModel.from_pretrained(model_dir)

# # Tokenize sentences
# encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')

# # Compute token embeddings
# with torch.no_grad():
#     model_output = model(**encoded_input)

# # Perform pooling
# sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])

# # Normalize embeddings
# sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)

# print("Sentence embeddings:")
# print(sentence_embeddings)


import os
from pathlib import Path
import re
import numpy as np
from sentence_transformers import SentenceTransformer, util
from YouDao import yd_translate
from functional import seq
import rich


class SemanticSearch:
    # https://huggingface.co/sentence-transformers/all-mpnet-base-v2
    def __init__(self, model_dir = '/home/tanchenwei/all-mpnet-base-v2', db_path = 'lcq'):
        self.lcq_text = np.load(db_path + '.pkl', allow_pickle=True)
        self.lcq_questions = np.array(seq(self.lcq_text).map(lambda x: x[0]).to_list())
        self.lcq_question_numbers = {}
        for q in self.lcq_questions:
            mres = re.match(r'^(\d+)', q)
            if mres:
                self.lcq_question_numbers[mres.group(1)] = q
        self.lcq_embeddings = np.load(db_path + '.npy')
        self.model = SentenceTransformer(model_dir, device='cpu')

    def get_sentence_embeddings(self, sentences):
        sentence_embeddings = self.model.encode(sentences)
        return sentence_embeddings

    def get_similarity_score(self, sentences):
        if type(sentences) == str:
            sentences = [sentences]
        target_embeddings = self.get_sentence_embeddings(sentences)
        cosine_scores = util.pytorch_cos_sim(target_embeddings, self.lcq_embeddings)
        return cosine_scores.numpy()
    
    def search(self, text) -> list[str]:
        # 判断是否为中文
        if re.findall(u'[\u4e00-\u9fa5]', text):
            text = yd_translate(text)
        scores = self.get_similarity_score(text)[0]
        idx = scores.argsort()[::-1]
        return self.lcq_questions[idx]

def test():
    ss = SemanticSearch()
    ## 题干搜索
    # res = ss.search("三数之和")
    # 使用对834进行口语化描述
    res = ss.search("想象一下，我们有一棵大树，这棵树上挂着很多果子，果子上都标了号，从0一直到n-1。这棵树的枝条把果子都连在了一起，而且每个果子都只有一条直接的路能走到其他任何果子那儿。现在，给你一个数字n，告诉你这棵树上有多少个果子，还有一个列表，叫做edges。这个列表里每一项都是一对果子的编号，比如[ai, bi]，这就意味着ai号和bi号的果子之间有一条直接的枝条相连。")
    Path("output.txt").write_text("\n".join(res))

def t_main():
    ss = SemanticSearch()
    while True:
        text = input("输入问题：")
        if text.isdigit() and text in ss.lcq_question_numbers:
            print(ss.lcq_question_numbers[text])
            continue
        res = ss.search(text)
        seq(res[:20]).for_each(lambda x: rich.print(f"[green]- [/green] {x}"))

if __name__ == '__main__':
    os.chdir(Path(__file__).parent)
    t_main()
