#!/usr/bin/env python
# coding: utf-8
import json
import pickle
import re
import torch
from tqdm import tqdm
from langchain.vectorstores import FAISS
from transformers import AutoTokenizer, AutoModelForSequenceClassification

from data_process import DataProcess, BM25
from llm_model import LLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings


class Config(object):
    # pdf的数据
    data_path = 'D:/project/私人调研/python/coggle/coggle/coggle_202401/data/初赛训练数据集.pdf'
    # 问题
    question_path = 'D:/project/私人调研/python/coggle/coggle/coggle_202401/data/questions.json'
    # 结果保存
    result_save = 'result/submit.json'
    # RAG检索的K
    rag_k = 15
    # 是否需要重新构建数据库
    reload_db = True
    # 模型
    bge = 'D:/env/bert_model/BAAI/bge-large-zh-v1.5'
    bge_rerank = 'D:/env/bert_model/BAAI/bge-reranker-base'
    gte = 'D:/bert/thenlper/gte-large-zh'
    # 数据库
    bge_vector_store = 'data/bge_vector'
    gte_vector_store = 'data/gte_vector'
    bm25_store = 'data/bm25.pkl'
    # 大模型
    llm_url = 'https://open.bigmodel.cn/api/paas/v4/chat/completions'
    llm_api_key = '83e5bc58555d8bac289e27bac50f8afc.Khk1JjCxb8MJN8Mi'


if __name__ == '__main__':
    config = Config()
    config.reload_db = True

    # 加载embedding
    bge_embed = HuggingFaceEmbeddings(model_name=config.bge)
    # gte_embed = HuggingFaceEmbeddings(model_name='D:/bert/thenlper/gte-large-zh')

    if config.reload_db is True:
        '''
            读取数据，制作文档
        '''
        process = DataProcess(config.data_path)
        process.gen_docs(512, 200)
        process.gen_docs(256, 100)
        '''
            制作向量库
        '''
        # 构建向量库
        bge_vector_store = FAISS.from_documents(process.docs, bge_embed)
        # gte_vector_store = FAISS.from_documents(process.docs, gte_embed)
        # 保存向量库
        bge_vector_store.save_local(config.bge_vector_store)
        # gte_vector_store.save_local(config.gte_vector_store)
        '''
            BM25
        '''
        bm25 = BM25(process.docs)
        with open(config.bm25_store, 'wb') as file:
            # 将对象序列化并写入到文件中
            pickle.dump(bm25, file)
    else:
        # 加载向量库
        bge_vector_store = FAISS.load_local(config.bge_vector_store, bge_embed)
        # gte_vector_store = FAISS.load_local(config.gte_vector_store, gte_embed)
        # 加载BM25
        with open(config.bm25_store, 'rb') as file:
            bm25 = pickle.load(file)

    # 加载rerank模型
    tokenizer = AutoTokenizer.from_pretrained(config.bge_rerank)
    rerank_model = AutoModelForSequenceClassification.from_pretrained(config.bge_rerank)
    rerank_model.cuda()
    rerank_model.eval()

    # 读取问题
    questions = json.load(open(config.question_path, 'r', encoding='utf-8'))

    # prompt
    prompt = """请你基于以下材料回答用户问题。回答要清晰准确，包含正确关键词。不要胡编乱造。如果所给材料与用户问题无关，只输出：无答案。\n
【材料】
{}
【用户问题】
{}
务必注意，如果所给材料无法回答用户问题，只输出无答案，不要自己回答。"""

    llm = LLM(config.llm_url, config.llm_api_key)
    # 预测结果
    answer_list = []
    for question in tqdm(questions):
        query = question['question']
        # 向量库查询
        bge_docs = bge_vector_store.similarity_search(query, k=config.rag_k)
        # gte_docs = gte_vector_store.similarity_search(query, k=config.rag_k)
        # bm25查询
        bm25_docs = bm25.search_top_k(query, k=config.rag_k)

        # 提取文档列表
        docs = bge_docs + bm25_docs
        # content_list = [doc.page_content for doc in docs]
        # 数据重排
        pairs = []
        for doc in docs:
            pairs.append([query, doc.page_content])
        inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
        with torch.no_grad():
            inputs = {key: inputs[key].cuda() for key in inputs.keys()}
            scores = rerank_model(**inputs, return_dict=True).logits.view(-1, ).float()
        scores = scores.cpu().numpy()
        # 查找最有用的三篇文档
        sorted_with_index = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
        top_5_index = [index for index, value in sorted_with_index[:5]]
        # 构建参考资料
        reference = ''
        max_page = 'page_{}'.format(docs[0].metadata['page_count'])

        for i, index in enumerate(top_5_index):
            doc = docs[index]
            if i == 0:
                max_page = 'page_{}'.format(doc.metadata['page_count'])

            text = '【第{}页 {}】{}'.format(doc.metadata['page_count'], doc.metadata['header'], doc.page_content)
            reference += '材料{}：\n{}\n'.format(i+1, text)

        resp = llm.get_llm(prompt.format(reference,query))['choices'][0]['message']['content']
        answer_list.append({'question': query, 'answer': resp, 'reference': max_page})

    with open(config.result_save, 'w', encoding='utf8') as up:
        json.dump(answer_list, up, ensure_ascii=False, indent=4)

