import numpy as np
from src.common.logger import getLogger
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

logger = getLogger()

class MMRRAG:

    def __init__(self, llm_model, embed_model, vector_store, collection_prefix, library_number, lambda_param, top_k):
        self.llm_model = llm_model
        self.embed_model = embed_model
        self.vector_store = vector_store
        self.collection_prefix = collection_prefix
        self.library_number = library_number
        self.lambda_param = lambda_param
        self.top_k = top_k

    def build_embed_vector(self, query):
        logger.info("MMRRAG calculate_document_similarity start")
        collection_name = self.collection_prefix + self.library_number
        doc_records, _ = self.vector_store.client.scroll(collection_name = collection_name, limit = 999999, offset = None)
        logger.info(f"MMRRAG calculate_document_similarity doc_records len: {len(doc_records)}")
        documents = [doc.payload.get("page_content", "") for doc in doc_records]
        query_vector = self.embed_model.embed_query(query)
        document_vectors = self.embed_model.embed_documents(documents)
        return documents, query_vector, document_vectors

    def cosine_similarity(self, vector1, vector2):
        vector1 = np.asarray(vector1)
        vector2 = np.asarray(vector2)
        product = np.dot(vector1, vector2)
        norm_vec1 = np.linalg.norm(vector1)
        norm_vec2 = np.linalg.norm(vector2)
        if norm_vec1 == 0 or norm_vec2 == 0:
            return 0.0

        similarity = product / (norm_vec1 * norm_vec2)
        return similarity

    def retrieve_mmr(self, documents, query_vector, document_vectors):
        logger.info("MMRRAG retrieve_mmr start")
        document_ids, document_dict, candidate_revelence = [], {}, {}
        for idx, vector in enumerate(document_vectors):
            document_ids.append(idx)
            document_dict[idx] = vector
            candidate_revelence[idx] = self.cosine_similarity(query_vector, vector)

        candidate_pool = set(document_ids)
        num_results = min(self.top_k, len(document_ids))
        logger.info(f"MMRRAG retrieve_mmr num_results: {num_results}")

        first_select_id = max(candidate_revelence, key = candidate_revelence.get)
        select_document_ids = [first_select_id]
        candidate_pool.remove(first_select_id)
        logger.info(f"MMRRAG retrieve_mmr first_select_id: {first_select_id}")

        while len(select_document_ids) < num_results and candidate_pool:
            mmr_scores = {}
            select_document_list = [document_vectors[id] for id in select_document_ids]

            for candidate_id in candidate_pool:
                candidate_vector = document_vectors[candidate_id]

                # Sim_1: 获取预计算的相关性
                # 使用预计算的相关性, -1.0作为默认值
                relevence_score = candidate_revelence.get(candidate_id, -1.0)

                # Sim_2: 计算与已选项目的最大相似度
                # 初始化为可能的最低余弦相似度
                max_similarity_select = -1.0

                if select_document_ids:
                    select_similarity = [self.cosine_similarity(candidate_vector, select_vector) for select_vector in select_document_list]
                    if select_similarity:
                        max_similarity_select = max(select_similarity)

                # 计算 MMR 分数
                # MMR Score = λ * Sim1(Di, Q) - (1 - λ) * max(Sim2(Di, Dj)) for Dj in S
                # 注意：如果 Sim1 和 Sim2 可能为负，需要确保公式逻辑正确
                mmr_score = self.lambda_param * relevence_score - (1 - self.lambda_param) * max_similarity_select
                mmr_scores[candidate_id] = mmr_score

            if not mmr_scores:
                break

            logger.info(f"MMRRAG retrieve_mmr mmr_scores: {mmr_scores}")
            max_score_id = max(mmr_scores, key = mmr_scores.get)
            select_document_ids.append(max_score_id)
            candidate_pool.remove(max_score_id)
        logger.info(f"MMRRAG retrieve_mmr select_document_ids: {select_document_ids}")
        return [documents[id] for id in select_document_ids]

    def invoke(self, query):
        logger.info("MMRRAG invoke start")
        documents, query_vector, document_vectors = self.build_embed_vector(query)
        retrieve_docs = self.retrieve_mmr(documents, query_vector, document_vectors)
        logger.info(f"MMRRAG invoke retrieve_docs len: {len(retrieve_docs)}")

        template = """
            你是一位问题解答专家，请根据提供的上下文回答用户问题。
            
            上下文：{context}
            用户问题：{question}
            
            请必须用中文详尽的回答问题。
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "context": retrieve_docs, "question": query })
        logger.info(f"MMRRAG invoke response len: {len(response)}")
        return { "retrieve_docs": retrieve_docs, "chain_result": response }
