import torch
from transformers import BertTokenizer
from sentence_transformers import SentenceTransformer
import numpy as np
import faiss
import time
import json
from faiss import normalize_L2
# from my_py_toolkit.file.file_toolkit import *
  

def readjson(file_path):
  """"""
#   make_path_legal(file_path)
  with open(file_path, "r", encoding="utf-8") as f:
    return json.load(f)

def normalize(x, axis=-1):
    """Normalizing to unit length along the specified dimension.
    Args:
      x: pytorch Variable
    Returns:
      x: pytorch Variable, same shape as input
    """
    x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)#2是范数计算中的幂指数值,就是取2范式
    return x

def encode_text(model, texts):
    return model.encode(texts, convert_to_tensor=True)

def load_model(model_path, model_cls, device):
    return model_cls(model_path, device=device)

def get_embeddings(model, source, device, encode_fn=encode_text):
    """模型评估函数
    批量预测, batch结果拼接, 一次性求spearman相关度
    """
    model.eval()
    print("进入内容搜索.............")
    sim_tensor = torch.tensor([], device=device)
    with torch.no_grad():
        source_embeddings = encode_fn(model, source) # model.encode(source, convert_to_tensor=True)
        sim_tensor = torch.cat((sim_tensor, source_embeddings), dim=0)
        print("get_embeddings")
        print(sim_tensor.shape)
        sim_tensor = normalize(sim_tensor)
        numpy_save = sim_tensor.cpu().numpy()
    return numpy_save

class VectorSearch:
    """
    向量检索类：
    1 初始化 embedding model
    2 初始化 检索库
        2.1 读取数据
        2.2 初始化 faiss
    3 提供检索方法
    """
    def __init__(self, model_path, embedding_file, embedding_content_json_file, device=None, model_cls=SentenceTransformer, load_model_fn=load_model) -> None:
        # load model
        self.max_len = 32
        self.device = device
        self.model = load_model_fn(model_path, model_cls, self.device)

        if not self.device:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 建立检索库
        self.embedding = np.load(embedding_file).astype('float32')
        normalize_L2(self.embedding)
        # embedding_content: [str, str, str]
        self.embedding_content = readjson(embedding_content_json_file)
        self.embedding_content_mapping = {v:i for i,v 
                                          in enumerate(self.embedding_content)}
        self.search_engine = faiss.IndexFlatIP(self.embedding.shape[1])
        self.search_engine.add(self.embedding.astype(np.float32))

    def search(self, text, topk=6, encode_fn=encode_text):
        """log:
        发烧
        进入内容搜索.............
        get_embeddings
        torch.Size([1, 768])
        -----------------------------------
        向量检索时间： 0.02337956428527832
        distance:  [[0.86788696 0.8139003  0.80737203 0.7872629  0.73046106 0.722893  ]]
        idx:  [[  554  7374  8760 15992 24959 14605]]
        {'0': ['发烧', '0.86788696'], '1': ['持续发烧', '0.8139003'], '2': ['反复发烧', '0.80737203'], '3': ['持续性发烧', '0.7872629'], '4': ['持续高烧', '0.73046106'], '5': ['急性发热', '0.722893']}

        """

        # 获取 embedding
        t_ids = self.embedding_content_mapping.get(text, -1)
        if t_ids > -1:
            text_embeding = self.embedding[t_ids:t_ids + 1]
        else:
            text_embeding = get_embeddings(self.model, [text], self.device, encode_fn)

        print("-----------------------------------")
        start_r = time.time()    
        distance, idx = self.search_engine.search(text_embeding, topk)
        end_r = time.time()
        print("向量检索时间：",str(end_r - start_r))
        print("distance: ", distance)
        print("idx: ", idx)

        search_res = {}
        for index,item in enumerate(idx[0]):
            #print(index,item,di_qu_all_question[item],"\t",distance[0][index])#di_qu_name[item])
            cur_search = self.embedding_content[item]
                #answer_list.append(da_an)
            if len(cur_search) > 1:
                search_res.update({
                    str(index):[cur_search,str(distance[0][index])]
                })

        return  search_res



if __name__ == '__main__':
    model_path = '/home/centos/ll/simcse_V2/SimCSE-Chinese-Pytorch/roberta-wwm-ll'
    embedding_file = '/home/centos/ll/simcse_V2/fassis_2/fassis_/ask_embedding.npy'
    embedding_content_json_file = '/home/centos/ll/simcse_V2/fassis_2/fassis_/all_symtoms_10.json'
    texts = ['感冒', '发烧', '头疼']
    vs = VectorSearch(model_path, embedding_file, embedding_content_json_file)
    for t in texts:
        print(t)
        print(vs.search(t))