import jieba
import numpy as np
from rank_bm25 import BM25Okapi
from src.common.logger import getLogger
from sklearn.preprocessing import MinMaxScaler
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class HybridRAG:

    def __init__(self, llm_model, vector_store, collection_prefix, library_number, top_k):
        self.llm_model = llm_model
        self.vector_store = vector_store
        self.collection_prefix = collection_prefix
        self.library_number = library_number
        self.collection_name = self.collection_prefix + self.library_number
        self.dense_weight = 0.5
        self.bm25_weight = 0.5
        self.rank = 60
        self.top_k = top_k
        self.combine_mode = "weight"

    def retrieve_dense(self, query):
        logger.info("HybridRAG retrieve_dense start")
        retrieve_docs = self.vector_store.similarity_search_with_score(query, k = self.top_k )
        return [(doc.metadata.get("doc_id"), score) for doc, score in retrieve_docs]

    def retrieve_bm25(self, query):
        logger.info("HybridRAG retrieve_bm25 start")
        collection_name = self.collection_prefix + self.library_number
        doc_records, _ = self.vector_store.client.scroll(collection_name = collection_name, limit = 999999, offset = None)
        logger.info(f"HybridRAG retrieve_bm25 doc_records len: {len(doc_records)}")

        texts, doc_ids, documents = [], [], {}
        for doc in doc_records:
            texts.append(doc.payload.get("page_content", ""))
            doc_ids.append(doc.payload.get("metadata", "")["doc_id"])
            documents[doc.payload.get("metadata", "")["doc_id"]] = doc.payload.get("page_content", "")

        tokenizer_corpus = [list(jieba.cut(doc)) for doc in texts]
        bm25 = BM25Okapi(tokenizer_corpus)

        tokenizer_query =list(jieba.cut(query))
        scores = bm25.get_scores(tokenizer_query)
        logger.info(f"HybridRAG retrieve_bm25 scores: {scores}")

        sorted_docs = sorted(enumerate(scores), key = lambda x: x[1], reverse = True)[:self.top_k]
        logger.info(f"HybridRAG retrieve_bm25 sorted_docs: {sorted_docs}")

        score_ids = []
        for doc_id, score in sorted_docs:
            score_ids.append((doc_ids[doc_id], score))
        logger.info(f"HybridRAG retrieve_bm25 score_ids: {score_ids}")
        return score_ids, documents

    def retrieve_combine(self, query):
        logger.info("HybridRAG retrieve_combine start")
        dense_scores = self.retrieve_dense(query)
        bm25_scores, documents = self.retrieve_bm25(query)
        logger.info(f"HybridRAG retrieve_combine dense_scores: {dense_scores}")
        logger.info(f"HybridRAG retrieve_combine bm25_scores: {bm25_scores}")
        if "weight" == self.combine_mode:
            doc_ids = self.combine_weight(dense_scores, bm25_scores)
        else:
            doc_ids = self.combine_rrf(dense_scores, bm25_scores)

        retrieve_documents = []
        for doc_id in doc_ids:
            if doc_id in documents:
                retrieve_documents.append(documents[doc_id])
        return retrieve_documents

    def combine_weight(self, dense_tuples, bm25_tuples):
        logger.info("HybridRAG combine_weight start")
        dense_dict = { doc_id: score for doc_id, score in dense_tuples }
        bm25_dict = { doc_id: score for doc_id, score in bm25_tuples }

        all_doc_ids = set(dense_dict.keys()) | set(bm25_dict.keys())

        dense_min_score = min(dense_dict.values())
        bm25_min_score = min(bm25_dict.values())
        logger.info(f"HybridRAG combine_weight dense_min_score: {dense_min_score}")
        logger.info(f"HybridRAG combine_weight bm25_min_score: {bm25_min_score}")

        dense_scores = [dense_dict.get(i, dense_min_score) for i in all_doc_ids]
        bm25_scores = [bm25_dict.get(i, bm25_min_score) for i in all_doc_ids]
        ids = list(all_doc_ids)

        norm_dense_scores = self.normalize_score(dense_scores)
        norm_bm25_scores = self.normalize_score(bm25_scores)
        logger.info(f"HybridRAG combine_weight norm_dense_scores: {norm_dense_scores}")
        logger.info(f"HybridRAG combine_weight norm_bm25_scores: {norm_bm25_scores}")

        final_scores = self.dense_weight * norm_dense_scores + self.bm25_weight * norm_bm25_scores
        logger.info(f"HybridRAG combine_weight final_scores: {final_scores}")
        combine_scores = sorted([(ids[i], final_scores[i]) for i in range(len(ids))], key = lambda x: x[1], reverse = True)
        logger.info(f"HybridRAG combine_weight combine_scores: {combine_scores}")
        return [doc_id for doc_id, _ in combine_scores[:self.top_k]]

    def normalize_score(self, scores):
        logger.info("HybridRAG normalize start")
        scores = np.array(scores)
        if scores.min() == scores.max():
            return np.full_like(scores, 0.5, dtype = float)
        return MinMaxScaler().fit_transform(scores.reshape(-1, 1)).flatten()

    def combine_rrf(self, dense_tuples, bm25_tuples):
        logger.info("HybridRAG combine_rrf start")
        dense_ids = [doc_id for doc_id, _ in dense_tuples]
        bm25_ids = [doc_id for doc_id, _ in bm25_tuples]

        dense_ranks = {doc_id: i + 1 for i, doc_id in enumerate(dense_ids)}
        bm25_ranks = {doc_id: i + 1 for i, doc_id in enumerate(bm25_ids)}

        all_doc_ids = set(dense_ranks.keys()) | set(bm25_ranks.keys())

        rrf_scores = {}
        for doc_id in all_doc_ids:
            score = 0.0
            if doc_id in dense_ranks:
                score += 1.0 / (self.rank + dense_ranks[doc_id])
            if doc_id in bm25_ranks:
                score += 1.0 / (self.rank + bm25_ranks[doc_id])
            rrf_scores[doc_id] = score
        logger.info(f"HybridRAG combine_rrf rrf_scores: {rrf_scores}")

        sorted_docs = sorted(rrf_scores.items(), key = lambda x: x[1], reverse = True)
        logger.info(f"HybridRAG combine_rrf sorted_docs: {sorted_docs}")
        return [doc_id for doc_id, _ in sorted_docs[:self.top_k]]

    def invoke(self, query):
        logger.info("HybridRAG invoke start")
        retrieve_docs = self.retrieve_combine(query)
        logger.info(f"HybridRAG invoke retrieve_docs len: {len(retrieve_docs)}")

        template = """
            你是一位问题解答专家，请根据提供的上下文回答用户问题。
            
            上下文：{context}
            用户问题：{question}
            
            请必须用中文详尽的回答问题。
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "context": retrieve_docs, "question": query })
        logger.info(f"HybridRAG invoke response len: {len(response)}")
        return { "retrieve_docs": retrieve_docs, "chain_result": response }
