import pandas as pd
import os
import torch
import faiss
from tqdm import tqdm
from dual_model import DualModel
from transformers import AutoTokenizer

"""
加载交互模型，进行精密排序
"""
from torch.nn import CosineSimilarity
from transformers import BertForSequenceClassification

os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "TRUE")


class LawSearchRobot(object):
    def __init__(self, knowledge_path, dual_model_path, cross_model_path, tokenizer_path, device):
        self.knowledge_path = knowledge_path
        self.device = device
        self.dual_model_path = dual_model_path
        self.cross_model_path = cross_model_path
        self.tokenizer_path = tokenizer_path
        self.load_everything()

    def load_knowledge(self):
        """
        1、读取知识库，该文件分为title 和 reply  ，title是问题、reply是答案
        """
        self.knowledges = pd.read_csv("law_faq.csv")
        batch_size = 32
        self.question_data_list = self.knowledges["title"].tolist()
        self.answer_data_list = self.knowledges["reply"].tolist()
        vectors = []
        with torch.inference_mode():
            for idx in tqdm(range(0, len(self.question_data_list), batch_size)):
                batch_questions = self.question_data_list[idx:idx + batch_size]
                # 处理之后 key ['input_ids', 'token_type_ids', 'attention_mask'] 每一个记录是32个
                tokenize_data = self.tokenizer(batch_questions, return_tensors="pt", max_length=128, truncation=True,
                                               padding=True)
                # print(tokenize_data.keys())
                # print(len(tokenize_data["input_ids"][0]))
                # print(len(tokenize_data["token_type_ids"][0]))
                # print(len(tokenize_data["attention_mask"][0]))
                inputs_tensors = {k: v.to(device) for k, v in tokenize_data.items()}
                batch_vectors = self.d_model.bert(**inputs_tensors)
                vectors.append(batch_vectors[1])
        """
        将law_faq 知识库中的问题进行向量化处理
        """
        self.torch_vectors = torch.concat(vectors, dim=0).cpu()
        vectors = torch.concat(vectors, dim=0).cpu().numpy()
        fasiss_vector_indexs = faiss.IndexFlatIP(1024)
        faiss.normalize_L2(vectors)
        fasiss_vector_indexs.add(vectors)
        self.fasiss_vector_indexs = fasiss_vector_indexs

    # 方法1
    def top_from_faiss(self, search_vector, top_k=4):
        faiss.normalize_L2(search_vector)
        # 找到 top 4 的索引和值
        top_values, top_indices = self.fasiss_vector_indexs.search(search_vector, top_k)
        return top_values[0], top_indices[0]

    # 方法 2
    def top_from_torch_cosine_similarity(self, vectors, search_vector, top_k=4):
        # 使用torch的CosineSimilarity计算top 4的结果
        cos_output = CosineSimilarity()(vectors.to(device), search_vector.to(device))
        # 找到 top 4 的索引和值
        top_values, top_indices = torch.topk(cos_output, top_k, dim=0)
        return top_values.cpu().numpy(), top_indices.cpu().numpy()

    def load_dual_model(self):
        d_model = DualModel.from_pretrained(self.dual_model_path)
        self.d_model = d_model.to(device)
        self.d_model.eval()

    def load_cross_model(self):
        c_model = BertForSequenceClassification.from_pretrained(self.cross_model_path)
        self.c_model = c_model.to(device)
        self.c_model.eval()

    def load_tokenizer(self):
        self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path)

    def load_everything(self):
        self.load_tokenizer()
        self.load_dual_model()
        self.load_cross_model()
        self.load_knowledge()

    def question_handler(self, question="寻衅滋事"):
        with torch.inference_mode():
            question_data = self.tokenizer(question, return_tensors="pt", max_length=128, truncation=True, padding=True)
            question_data = {k: v.to(device) for k, v in question_data.items()}
            torch_question_vector = self.d_model.bert(**question_data)[1]
            numpy_question_vector = torch_question_vector.cpu().numpy()
            # 使用torch的CosineSimilarity计算top 4的结果
        return torch_question_vector, numpy_question_vector

    def do_search(self, question, top_k=10):
        print("开始推理 问题 {}".format(question))
        # question = "三方交通事故"
        k = top_k  # 搜索最相似的4个向量
        torch_question_vector, numpy_question_vector = self.question_handler(question=question)
        top_values, top_indices = self.top_from_torch_cosine_similarity(self.torch_vectors.to(device),
                                                                        torch_question_vector.to(device),
                                                                        top_k=k)

        candidate_questions = [self.question_data_list[i] for i in top_indices]
        ques = [question] * len(candidate_questions)
        # print(ques)
        # print(cadidate)
        inputs = self.tokenizer(ques, candidate_questions, return_tensors="pt", padding=True, max_length=128,
                                truncation=True)
        inputs = {k: v.to(self.device) for k, v in inputs.items()}
        with torch.inference_mode():
            logits = self.c_model(**inputs).logits.squeeze()
            result = torch.argmax(logits, dim=-1)

        candidate_answers = [self.answer_data_list[i] for i in top_indices]
        print("查询问题:{}".format(question))
        print("知识库问题:{}".format(candidate_questions[int(result.item())]))
        print("知识库答案:{}".format(candidate_answers[int(result.item())]))



knowledge_path = "law_faq.csv"
dual_model_path = "/data/logs/qq_search_robot/dual_model/checkpoint-19000"
cross_model_path = "/data/logs/qq_search_robot/cross_model/checkpoint-14250"
tokenizer_path = '/data/models/huggingface/chinese-macbert-large'
device = "cuda"
robot = LawSearchRobot(knowledge_path, dual_model_path, cross_model_path, tokenizer_path, device)
question = "过失杀人"
robot.do_search(question, top_k=30)
