from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
from scipy.stats import rankdata
import pandas as pd
import numpy as np
import torch
import faiss
import json
import re

# 加载知识库
data_path = r'100相似关键词_知识库'
data = json.load(open(data_path))

# 加载用户问题
answer_path = r'用户回答'
answer = json.load(open(answer_path))

# 加载 acge 和 bge 模型
acge_path = r'models--aspire--acge_text_embedding\snapshots\16582d03bb81daeae334c3a73d388d270c46a32a'
m3e_path = r'm3e-base'
bge_reranker_path = r'models--BAAI--bge-reranker-base\snapshots\b97324c339751a2af21e0b191f0b5f6db9409312'

acge_model = SentenceTransformer(acge_path)
m3e_model = SentenceTransformer(m3e_path)
reranker_tokenizer = AutoTokenizer.from_pretrained(bge_reranker_path)
reranker_model = AutoModelForSequenceClassification.from_pretrained(bge_reranker_path)
a = reranker_model.cuda()
a = reranker_model.eval()

# 将知识库存入向量库中并保存到本地.
# idx: 小问题号
# for idx in data:
#     # point: 加分点
#     for point_idx, point in enumerate(data[idx]):
#         acge_embedding = acge_model.encode(point, normalize_embeddings=True)
#         m3e_embedding = m3e_model.encode(point, normalize_embeddings=True)

#         # 存入向量数据库中
#         index_acge = faiss.IndexFlatL2(acge_embedding.shape[1])
#         index_acge.add(acge_embedding)
#         faiss.write_index(index_acge, f'Database\question_s{idx}\point{point_idx+1}\\faiss_acge.index')
#         index_m3e = faiss.IndexFlatL2(m3e_embedding.shape[1])
#         index_m3e.add(m3e_embedding)
#         faiss.write_index(index_m3e, f'Database\question_s{idx}\point{point_idx+1}\\faiss_m3e.index')


# --------------------------------------------------------------------------------------------------------
# 推理部分


# 创建一个DateFrame保存分数做对比
dict_ = {
    '用户得分_x': []
}
df = pd.DataFrame(dict_)

# 加载向量库到内存
def embeddings_load(path, needT=True):
    # 加载faiss索引
    index = faiss.read_index(path)
    # 获取索引中的向量总数和维度
    num_vectors = index.ntotal
    vector_dim = index.d
    # 逐个检索向量
    all_vectorse = np.empty((num_vectors, vector_dim), dtype='float32')
    for i in range(num_vectors):
        index.reconstruct(i, all_vectorse[i])

    if needT:
        embeddings = all_vectorse.T
        return embeddings
    else:
        return all_vectorse
    

threshold = -0.3 # 阈值
# 遍历用户的回答, 加载对应题目的向量库, 进行检索
# 遍历回答集, question_idx: 小问的题号
for question_idx in answer:
    # 遍历每个小问的所有问题, user_answer: 每个用户的全部回答
    for user_answer in answer[question_idx]:
        question_score = 0 # 定义分数用来保存该用户在该小问的分数
        keywords_path = '关键词'
        keywords = json.load(open(keywords_path)) # 读取关键词文件, 加载每个小问的加分点数和分
        # 遍历该小问的所有加分点的值
        for idx, keyword in enumerate(keywords[question_idx].values()):
            point_path = idx + 1 # 用于加载对应向量库
            key_score = float(list(keyword.values())[0])
            # 加载向量库
            faiss_acge_path = f'Database\question_s{question_idx}\point{point_path}\\faiss_acge.index'
            faiss_m3e_path = f'Database\question_s{question_idx}\point{point_path}\\faiss_m3e.index'
            embeddings_acge = embeddings_load(faiss_acge_path)
            embeddings_m3e = embeddings_load(faiss_m3e_path)

            # 创建列表保存每个关键词的检索打分
            point_list=[]

            # 遍历该题号下用户的回答, cut_answer: 每个用户的回答切分之后个体
            for cut_answer in user_answer:
                acge_answer_embedding = acge_model.encode(cut_answer, normalize_embeddings=True)
                m3e_answer_embedding = m3e_model.encode(cut_answer, normalize_embeddings=True)
                score_acge = acge_answer_embedding @ embeddings_acge
                score_m3e = m3e_answer_embedding @ embeddings_m3e
                
                score = rankdata(score_acge) + rankdata(score_m3e)
                max_score_idxs = score.argsort()[-5:]

                # 重排序
                pairs = []
                for idxs in max_score_idxs:  
                    pairs.append([cut_answer, data[question_idx][idx][idxs]])

                # 正向传播
                inputs = reranker_tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
                with torch.no_grad():
                    inputs = {key: inputs[key].cuda() for key in inputs.keys()}
                    scores = reranker_model(**inputs, return_dict=True).logits.view(-1, ).float()

                max_score_page_idx = max_score_idxs[scores.cpu().numpy().argsort()][-1:]
                point_list.append(scores[max_score_page_idx])
            point_list.sort()
            if point_list[-1] >= threshold:
                question_score += key_score
        df.loc[len(df)] = {'用户得分_x': question_score}