import pandas as pd
import os
from tqdm import tqdm
from dual_model import DualModel
from transformers import AutoTokenizer

os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "TRUE")


device = "cuda"
"""
1、读取知识库，该文件分为title 和 reply  ，title是问题、reply是答案
"""
data = pd.read_csv("law_faq.csv")
# 数据展示
print(data.head())
"""
2、加载训练好的模型
"""
train_model_dir = "D:/code/logs/dual_model/checkpoints/checkpoint-1000"
d_model = DualModel.from_pretrained(train_model_dir)
d_model = d_model.to(device)
d_model.eval()
print("向量匹配模型加载完毕")
"""
3、加载tokenizer
"""
pre_train_model_path = '/data/models/huggingface/chinese-macbert-base'
"""
4、加载词嵌入模型
"""
tokenizer = AutoTokenizer.from_pretrained(pre_train_model_path)
"""
5、转向量 进行qq匹配，取得 law_faq 的question部分
"""

import torch

batch_size = 32
question_data_list = data["title"].tolist()
vectors = []
with torch.inference_mode():
    for idx in tqdm(range(0, len(question_data_list), batch_size)):
        batch_questions = question_data_list[idx:idx + batch_size]
        # 处理之后 key ['input_ids', 'token_type_ids', 'attention_mask'] 每一个记录是32个
        tokenize_data = tokenizer(batch_questions, return_tensors="pt", max_length=128, truncation=True, padding=True)
        # print(tokenize_data.keys())
        # print(len(tokenize_data["input_ids"][0]))
        # print(len(tokenize_data["token_type_ids"][0]))
        # print(len(tokenize_data["attention_mask"][0]))
        inputs_tensors = {k: v.to(device) for k, v in tokenize_data.items()}
        batch_vectors = d_model.bert(**inputs_tensors)
        vectors.append(batch_vectors[1])
"""
将law_faq 知识库中的问题进行向量化处理
"""
torch_vectors = torch.concat(vectors, dim=0).cpu()
"""
k = 4  # 搜索最相似的4个向量  
D, I = index.search(vectors_np[0:1], k)  # 搜索单个向量（作为批处理）  
print(f"Distances of the top {k} neighbors: {D[0]}")  
print(f"Indices of the top {k} neighbors: {I[0]}")  
"""

from torch.nn import CosineSimilarity


def top_from_torch_cosine_similarity(vectors, search_vector, top_k=4):
    # 使用torch的CosineSimilarity计算top 4的结果
    cos_output = CosineSimilarity()(vectors.to(device), search_vector.to(device))
    # 找到 top 4 的索引和值
    top_values, top_indices = torch.topk(cos_output, top_k, dim=0)
    return top_values.cpu().numpy(), top_indices.cpu().numpy()


def question_handler(question="寻衅滋事"):
    with torch.inference_mode():
        question_data = tokenizer(question, return_tensors="pt", max_length=128, truncation=True, padding=True)
        question_data = {k: v.to(device) for k, v in question_data.items()}
        torch_question_vector = d_model.bert(**question_data)[1]
        numpy_question_vector = torch_question_vector.cpu().numpy()
        # 使用torch的CosineSimilarity计算top 4的结果
    return torch_question_vector, numpy_question_vector

question="三方交通事故"
k = 10  # 搜索最相似的4个向量
torch_question_vector, numpy_question_vector = question_handler(question=question)
top_values, top_indices = top_from_torch_cosine_similarity(torch_vectors.to(device), torch_question_vector.to(device),
                                                     top_k=k)
print("top_from_torch_cosine_similarity",top_values, top_indices)
"""
计算余弦相似度的方法 1 
"""
import faiss

vectors = torch.concat(vectors, dim=0).cpu().numpy()
fasiss_vector_indexs = faiss.IndexFlatIP(768)
faiss.normalize_L2(vectors)
fasiss_vector_indexs.add(vectors)


def top_from_faiss(fasiss_vector_indexs, search_vector, top_k=4):
    faiss.normalize_L2(search_vector)
    # 找到 top 4 的索引和值
    top_values, top_indices = fasiss_vector_indexs.search(search_vector, top_k)
    return top_values[0], top_indices[0]


"""
[0.9519276 0.9507943 0.944895  0.9408473] [ 9828  1610 13298  1266]
[[0.9519276  0.95079416 0.9448951  0.9408473 ]] [[ 9828  1610 13298  1266]]
['交通事故逃逸', '捷信真他妈的高利贷', '黑*江*集*长*煤*拖欠工资', '高利贷不还会被拉黑']
"""
# 召回一部分question
top_values, top_indices = top_from_faiss(fasiss_vector_indexs, numpy_question_vector, k)  # 搜索单个向量（作为批处理）
print("top_from_faiss",top_values, top_indices)
print([question_data_list[i] for i in top_indices])

"""
加载交互模型，进行精密排序
"""
from transformers import BertForSequenceClassification
cross_model_dir = "D:/code/logs/inter_trainer/checkpoints/checkpoint-2500"
cross_model =  BertForSequenceClassification.from_pretrained(cross_model_dir)
cross_model = cross_model.to(device)
print("加载交互模型")

candidate_questions = [question_data_list[i] for i in top_indices]
ques =  [question] * len(candidate_questions)
# print(ques)
# print(cadidate)
inputs =  tokenizer(ques,candidate_questions,return_tensors="pt",padding=True,max_length=128,truncation=True)
inputs =  {k:v.to(device) for k,v in inputs.items()}
with torch.inference_mode():
    logits =  cross_model(**inputs).logits.squeeze()
    result= torch.argmax(logits,dim=-1)
print(candidate_questions[int(result.item())])