from pydantic import Field
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_community.retrievers import BM25Retriever
import json
from langchain.retrievers import  EnsembleRetriever
from rank_bm25 import BM25Okapi
from transformers import AutoTokenizer,AutoModelForSequenceClassification
import torch
import jieba

def rag_func(str_):
    if type(str_) == str:
        str_=str_.replace("'",'"')
        try:
            dict = json.loads(str_)
        except json.JSONDecodeError:
            cleaned_str = str_.strip()
            if cleaned_str.startswith('"') and cleaned_str.endswith('"'):
                cleaned_str = cleaned_str[1:-1]
            if cleaned_str.startswith("'") and cleaned_str.endswith("'"):
                cleaned_str = cleaned_str[1:-1]

            try:
                dict = json.loads(cleaned_str)
            except json.JSONDecodeError:
                try:
                    dict = eval(cleaned_str)
                except:
                    return f"无法解析输入参数: {str_}"
    else:
        dict=str_
    query=dict.get('query')
    rag_path=dict.get('rag_path')
    chunks=dict.get('chunks')
    if rag_path =='' or chunks ==[]:
        return f'缺少参数向量数据库路径或向量数据库对应的文本内容，你可以尝试访问工具get_rag_db'
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    embeddings = HuggingFaceEmbeddings(
        model_name="/home/lonely_bullet/work_place/models/AI-ModelScope/bge-large-zh-v1___5"
    )
    vs = FAISS.load_local(rag_path, embeddings,allow_dangerous_deserialization=True)

    faiss_retriever = vs.as_retriever(search_kwargs={"k": 3})
    text=[list(jieba.cut(chunk)) for chunk in chunks]
    bm25 = BM25Okapi(text)
    bm25_retriever = BM25Retriever.from_texts(chunks)
    bm25_retriever.k = 3
    bm25_retriever.vectorizer = bm25

    ensemble_retriever = EnsembleRetriever(
        retrievers=[faiss_retriever, bm25_retriever],
        weights=[0.7, 0.3]
    )

    docs = ensemble_retriever.invoke(query)

    pairs = [[query, doc.page_content] for doc in docs]
    tokenizer = AutoTokenizer.from_pretrained(
        "/home/lonely_bullet/work_place/models/AI-ModelScope/bge-reranker-v2-m3")
    rerank_model = AutoModelForSequenceClassification.from_pretrained(
        "/home/lonely_bullet/work_place/models/AI-ModelScope/bge-reranker-v2-m3").to(device)
    rerank_model.eval()

    with torch.no_grad():
        model_inputs = tokenizer(pairs, padding=True, truncation=True, max_length=512, return_tensors='pt').to(
            device)
        rerank_outputs = rerank_model(**model_inputs, return_dict=True).logits.view(-1, ).float()
        res = sorted([(pair[:][1], score) for pair, score in zip(pairs, rerank_outputs)], key=lambda x: x[1],
                     reverse=True)

    return res