from langchain.document_loaders import TextLoader,JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from faissManager import FAISS
from sent2vec import Sent2VecEmbeddings
from langchain.document_transformers import (
    LongContextReorder,
)
from dataclasses import dataclass
# @dataclass
# class template:

class retriver:
    def __init__(self,index_dir='/home/lxy/DPR/9m4d_bge_2wft_34w_norm',model_name='/home/lxy/DPR/models/checkpoint-53910-epoch-10',
                 only_retriever=False) -> None:
        self.only_retriever=only_retriever
        self.db=FAISS.load_local(index_dir,Sent2VecEmbeddings(model_name=model_name))
        print(f'成功加载了编码器{model_name}和向量数据库{index_dir}-------------------------------------------------------------------------------------------------------')
        print('==========================================================================================================================================================')

    def retrieve(self,query,k=3,score_threshold=0.4,roles=None,debug=True, get_doc_list=False):
        # begin_idx=query.rfind('Human:')+7
        # end_idx=query.rfind('\nAssistant:')
        # real_question=query[begin_idx:end_idx]
        # print(roles)
        if roles is not None:
            # 说明是fastchat，传进来的应该是[role1,role2]
            real_question=query[query.rfind(roles[0])+len(roles[0]):query.rfind(roles[1])]
        else:
            real_question=query
        if debug:
            print(f'获取到的问题：{real_question}\n')
        # docs = self.db.similarity_search(real_question,k,)
        docs = self.db.similarity_search(real_question,2*k,score_threshold=score_threshold)
        docs_content=set()
        valid_doc_count=0
        valid_doc=[]
        for i,doc in enumerate(docs):
            if valid_doc_count>=k:
                break
            if doc.page_content not in docs_content:
                docs_content.add(doc.page_content)
                valid_doc_count+=1
                valid_doc.append(doc)
            
        if debug:
            print('valid_doc',valid_doc)

        reordering_docs=valid_doc
        if self.only_retriever==False:
            reordering = LongContextReorder()
            reordering_docs = reordering.transform_documents(valid_doc)
        if debug:
            print(f'过滤掉了{2*k-len(docs)}个距离超出{score_threshold}检索到的文档，经过去重又过滤了{len(docs)-len(valid_doc)}个\n')
        if self.only_retriever==True:
            return [doc.page_content for doc in valid_doc]

        if len(reordering_docs):
            if get_doc_list:
                return [doc.page_content for doc in reordering_docs]

            prompt="背景：\n"
            # prompt=""
            for i,d in enumerate(reordering_docs):
                prompt+=f'{i+1}.'+reordering_docs[i].page_content+'\n'

            # prompt=query[:begin_idx]+prompt+'问题：'+real_question+query[end_idx:]
            
            if roles is None:
                # prompt=prompt+"\n\n问题: "+real_question
                prompt=prompt+"\n请联系上下文回答问题: "+real_question
                return prompt
            else:
                # 是fastchat！
                    #  sys prompt + knowledge prompt + role1 +real_q +role2
                return query[:query.rfind(roles[0])]+prompt+query[query.rfind(roles[0]):query.rfind(roles[0])+len(roles[0])]+real_question+query[query.rfind(roles[1]):]
        else:
            return query
        

# index_dir='/data/lxy/RAT/10m5d_wxb_bge1.5_hnswivf_maxNorm/'
# model_name='BAAI/bge-large-zh-v1.5'
# r=retriver(index_dir,model_name)
# print(r.retrieve("我想办理在校生四六级证明"))
# db=FAISS.load_local('/home/lxy/DPR/qg_bge_2wfinetuned_340000',Sent2VecEmbeddings(model_name='/home/lxy/DPR/models/checkpoint-53910-epoch-10'))
# retriever=db.as_retriever(search_type="similarity_score_threshold",
#                 search_kwargs={'score_threshold': 0.3})
# retriever=db.as_retriever()
# print(retriever.get_relevant_documents("我想上东北大学！！"))



index_dir='/home/lxy/gaobaodata/test'
model_name='/data/lxy/thenlper/gte-large-zh'
r=retriver(index_dir,model_name,only_retriever=True)

# while True:
#     inputs=str(input())
#     print(r.retrieve(inputs,score_threshold=0.8,debug=False))

# db=FAISS.load_local('/home/lxy/DPR/qg_bge_2wfinetuned_340000',Sent2VecEmbeddings(model_name=model_name))

if __name__=='__main__':

    candidates=['最低分','最低位次','录取人数']
    import pandas as pd
    # 读取整个 Excel 文件
    df = pd.read_excel('/home/lxy/multiR/分类测试1.xlsx')
    df=df.fillna('None')
    # 选择特定行范围（例如，从第1行到第5行）
    specific_rows = df.iloc[0:5]
    # 选择特定列范围（例如，从第A列到第C列）
    specific_columns = df.loc[:,'问题']
    answer_columns=df.loc[:,'期待的分类']
    possible_thr=[0.7]
    best_thr=0
    current_wrong=len(specific_columns)
    for thr in possible_thr:
        thr=thr/10
        wrong_cnt=0
        for i,question in enumerate(specific_columns):
            fake_answer=r.retrieve(question,score_threshold=thr,debug=False)
            # if len(fake_answer)!=0:
            #     print(fake_answer,thr)
            #     exit()
            if len(fake_answer)==0 :
                if answer_columns[i]!='None':
                    wrong_cnt+=1
                    print(question,f'输出答案None',f'期待答案{answer_columns[i]}')

            elif fake_answer[0]!=answer_columns[i]:
                # print('?',fake_answer[1])
                wrong_cnt+=1
                print(question,f'输出答案{fake_answer[0]}',f'期待答案{answer_columns[i]}')
        if wrong_cnt<=current_wrong:
            best_thr=thr
            current_wrong=wrong_cnt
    print(f'{current_wrong}/{len(specific_columns)}',best_thr)