from langchain_community.retrievers import BM25Retriever
from typing import List
import jieba
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader
from langchain_huggingface import HuggingFaceEmbeddings
from rank_bm25 import BM25Okapi
from langchain_openai import ChatOpenAI


loader = TextLoader('medical_data.txt')
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size = 500,
    chunk_overlap  = 0,
    length_function = len,
    separators=['\n']
)
docs = text_splitter.split_documents(documents)
print(docs[0])

def preprocessing_func(text: str) -> List[str]:
    return list(jieba.cut(text))
bm25 = BM25Retriever(docs=docs,k=10)
print(bm25.k)
retriever = bm25.from_documents(docs,preprocess_func=preprocessing_func)

print(retriever.invoke('骨折了应该怎么办'))


texts = [i.page_content for i in docs]
texts_processed = [preprocessing_func(t) for t in texts]
vectorizer = BM25Okapi(texts_processed)

vectorizer.get_top_n(preprocessing_func('骨折了应该怎么办'),texts, n=10)

print(vectorizer.get_top_n(preprocessing_func('骨折了应该怎么办'),texts, n=10))

embeddings = HuggingFaceEmbeddings(model_name='/media/songzhijun/AI1/LLM/BAAI/bge-large-zh-v1.5', model_kwargs = {'device': 'cuda:0'})

db = FAISS.from_documents(docs, embeddings)

# db.save_local('ypur save path')

bm25_res = vectorizer.get_top_n(preprocessing_func('骨折了应该怎么办'),texts, n=10)
print(bm25_res)


vector_res = db.similarity_search('骨折了应该怎么办', k=10)

print(vector_res)


def rrf(vector_results: List[str], text_results: List[str], k: int = 10, m: int = 60):
    """
    使用RRF算法对两组检索结果进行重排序

    params:
    vector_results (list): 向量召回的结果列表,每个元素是专利ID
    text_results (list): 文本召回的结果列表,每个元素是专利ID
    k(int): 排序后返回前k个
    m (int): 超参数

    return:
    重排序后的结果列表,每个元素是(文档ID, 融合分数)
    """

    doc_scores = {}

    # 遍历两组结果,计算每个文档的融合分数
    for rank, doc_id in enumerate(vector_results):
        doc_scores[doc_id] = doc_scores.get(doc_id, 0) + 1 / (rank + m)
    for rank, doc_id in enumerate(text_results):
        doc_scores[doc_id] = doc_scores.get(doc_id, 0) + 1 / (rank + m)

    # 将结果按融合分数排序
    sorted_results = [d for d, _ in sorted(doc_scores.items(), key=lambda x: x[1], reverse=True)[:k]]

    return sorted_results

vector_results = [i.page_content for i in vector_res]
text_results = [i for i in bm25_res]
rrf_res = rrf(vector_results, text_results)
print(rrf_res)


prompt = '''
任务目标：根据检索出的文档回答用户问题
任务要求：
    1、不得脱离检索出的文档回答问题
    2、若检索出的文档不包含用户问题的答案，请回答我不知道

用户问题：
{}

检索出的文档：
{}
'''


model = ChatOpenAI(model='Qwen2-7B-Instruct', base_url='http://localhost:8000/v1', api_key='n')
res = model.invoke(prompt.format('骨折了应该怎么办', ''.join(rrf_res)))
print(res.content)

res = model.invoke('骨折了应该怎么办')
print(res.content)