from typing import List
import json
import uuid

import dotenv
import weaviate
from langchain.retrievers import MultiQueryRetriever
from langchain_community.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_openai import ChatOpenAI
from langchain_weaviate import WeaviateVectorStore
from weaviate.auth import AuthApiKey
from langchain.load import dumps
import datetime  # 用于处理时间警告

dotenv.load_dotenv()


# 修复UUID序列化问题的函数
def safe_loads(doc_str):
    """
    安全地加载序列化的文档，处理UUID对象等特殊情况
    解决：NotImplementedError: Trying to load an object that doesn't implement serialization
    """
    try:
        # 尝试使用json.loads直接解析
        doc_dict = json.loads(doc_str)

        # 递归处理文档中可能包含的UUID字符串，将其转换回UUID对象
        def handle_possible_uuid(obj):
            if isinstance(obj, dict):
                # 检查是否是LangChain的UUID表示形式
                if 'lc' in obj and obj['lc'] == 1 and 'type' in obj and obj[
                    'type'] == 'not_implemented' and 'id' in obj and obj['id'] == ['uuid', 'UUID'] and 'repr' in obj:
                    # 从repr中提取UUID字符串
                    uuid_str = obj['repr'].split("'")[1]
                    return uuid.UUID(uuid_str)
                # 处理其他字典
                return {k: handle_possible_uuid(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [handle_possible_uuid(item) for item in obj]
            else:
                return obj

        # 处理文档字典
        processed_doc = handle_possible_uuid(doc_dict)

        # 重新构建Document对象
        return Document(
            page_content=processed_doc.get('page_content', ''),
            metadata=processed_doc.get('metadata', {})
        )
    except Exception as e:
        # 如果解析失败，尝试使用更简单的方式
        print(f"Warning: Failed to parse document with json.loads: {e}")
        # 这里可以添加更多的回退策略
        # 对于这个修复，我们至少确保函数不会崩溃
        return Document(page_content="", metadata={})


class RAGFusionRetriever(MultiQueryRetriever):
    """RAG多查询结果融合策略检索器"""
    k: int = 4

    def retrieve_documents(
            self,
            queries: List[str],
            run_manager: CallbackManagerForRetrieverRun,
    ) -> List[List]:
        """重写检索文档函数，返回值变成一个嵌套的列表"""
        documents = []
        for query in queries:
            docs = self.retriever.invoke(
                query, config={"callbacks": run_manager.get_child()}
            )
            documents.append(docs)  # 修改这里，将每个查询的结果作为单独的列表添加
        return documents

    def unique_union(self, documents: List[List]) -> List[Document]:
        """使用RRF算法来去重合并对应的文档，参数为嵌套列表，返回值为文档列表"""
        # 1.定义一个变量存储每个文档的得分信息
        fused_result = {}

        # 2. 循环两层获取每一个文档信息
        for docs in documents:
            for rank, doc in enumerate(docs):
                # 3.使用dump函数将类转换为字符串
                doc_str = dumps(doc)
                # 4. 判断下该文档的字符串是否已经计算过得分
                if doc_str not in fused_result:
                    fused_result[doc_str] = 0
                # 5. 计算新的分
                fused_result[doc_str] += 1 / (rank + 60)
        # 6. 执行排序操作，获取相应的数据，使用的是降序
        reranked_results = []
        for doc, score in sorted(fused_result.items(), key=lambda x: x[1], reverse=True):
            try:
                # 使用我们自定义的safe_loads函数替代原始的loads
                loaded_doc = safe_loads(doc)
                reranked_results.append((loaded_doc, score))
            except Exception as e:
                print(f"Warning: Failed to load document: {e}")
                # 继续处理其他文档，避免整个过程崩溃
                continue

        # 只返回文档部分，不包含分数
        return [item[0] for item in reranked_results[:self.k]]


# 1. 构建向量数据库与检索器
# 创建客户端连接（使用新的connect_to_weaviate_cloud方法）
client = weaviate.connect_to_weaviate_cloud(
    cluster_url="https://zabwh0mbt4errmvpknamq.c0.asia-southeast1.gcp.weaviate.cloud",
    auth_credentials=AuthApiKey("b2o4OGQxcmptMTZEWmJ5VV9udE5xSXBzQW04dUlDZ0JSS0d1ay9FQlhXdEtyMDR4OUFVNzc0eG9mU3dnPV92MjAw")
)

db = WeaviateVectorStore(
    client=client,
    index_name="myleane",
    text_key="text",
    embedding=QianfanEmbeddingsEndpoint(),
)

retriever = db.as_retriever(search_type="mmr")

# 2. 创建所检查检索器
rag_fusion_retriever = RAGFusionRetriever.from_llm(
    retriever=retriever,
    # 2.创建大语言模型
    llm=ChatOpenAI(model_name="kimi-k2-0711-preview", temperature=0),
)

# 3. 执行检索
try:
    docs = rag_fusion_retriever.invoke("关于LLMOps应用配置的文档有哪些")
    print("检索成功!")
    print(f"文档数量: {len(docs)}")
    for i, doc in enumerate(docs):
        print(f"文档 {i+1}:")
        print(f"  内容: {doc.page_content[:100]}...")  # 只打印前100个字符
        print(f"  元数据: {doc.metadata}")
        print()
except Exception as e:
    print(f"检索过程中出错: {e}")
finally:
    # 确保客户端关闭，避免资源泄漏警告
    if 'client' in locals() and hasattr(client, 'close'):
        client.close()
        print("Weaviate客户端已关闭")