import jieba
from rank_bm25 import BM25Okapi
from src.common.logger import getLogger
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class BM25RAG:

    def __init__(self, llm_model, vector_store, collection_prefix, library_number, top_k):
        self.llm_model = llm_model
        self.vector_store = vector_store
        self.collection_prefix = collection_prefix
        self.library_number = library_number
        self.top_k = top_k

    def load_documents(self):
        logger.info("BM25RAG load_documents start")
        collection_name = self.collection_prefix + self.library_number
        doc_records, _ = self.vector_store.client.scroll(collection_name = collection_name, limit = 999999, offset = None)
        logger.info(f"BM25RAG load_documents doc_records len: {len(doc_records)}")
        return [doc.payload.get("page_content", "") for doc in doc_records]

    def create_bm25(self, documents):
        logger.info("BM25RAG search_corpus start")
        tokenizer_corpus = [list(jieba.cut(doc)) for doc in documents]
        bm25 = BM25Okapi(tokenizer_corpus)
        return bm25

    def search_bm25(self, documents, query):
        logger.info("BM25RAG search_bm25 start")
        tokenizer_query =list(jieba.cut(query))
        scores = self.create_bm25(documents).get_scores(tokenizer_query)

        indices = sorted(range(len(scores)), key = lambda i: scores[i], reverse = True)[:self.top_k]
        logger.info(f"BM25RAG search_bm25 indices: {indices}")
        return "\n".join([documents[i] for i in indices])

    def invoke(self, query):
        logger.info("BM25RAG invoke start")
        documents = self.load_documents()
        retrieve_docs = self.search_bm25(documents, query)
        logger.info(f"BM25RAG invoke retrieve_docs len: {len(retrieve_docs)}")

        template = """
            你是一位问题解答专家，请根据提供的上下文回答用户问题。
            
            上下文：{context}
            用户问题：{question}
            
            请必须用中文详尽的回答问题。
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "context": retrieve_docs, "question": query })
        logger.info(f"BM25RAG invoke response len: {len(response)}")
        return { "retrieve_docs": retrieve_docs, "chain_result": response }
