import os

from qdrant_client import QdrantClient
from qdrant_client.models import VectorParams, Distance

EMBEDDING_DIM = 1536
COLLECTION_NAME = "full_demo"
PATH = "./qdrant_db"

client = QdrantClient(path=PATH)

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, get_response_synthesizer
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.response_synthesizers import ResponseMode
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import Settings
from llama_index.core import StorageContext
from llama_index.core.postprocessor import LLMRerank, SimilarityPostprocessor
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels
from llama_index.embeddings.dashscope import DashScopeEmbedding, DashScopeTextEmbeddingModels

# 1. 指定全局llm与embedding模型
Settings.llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX, api_key=os.getenv("DASHSCOPE_API_KEY"))
Settings.embed_model = DashScopeEmbedding(model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V1)

# 2. 指定全局文档处理的 Ingestion Pipeline（缩小分块以提升匹配）
Settings.transformations = [SentenceSplitter(chunk_size=256, chunk_overlap=64)]

# 3. 加载本地文档
documents = SimpleDirectoryReader("./data").load_data()
print(f"[RAG] 已从 ./data 加载文档数量: {len(documents)}")
if not documents:
    print("[RAG][警告] ./data 未找到任何文档，可能导致回答为空。")


if client.collection_exists(collection_name=COLLECTION_NAME):
    client.delete_collection(collection_name=COLLECTION_NAME)

# 4. 创建 collection
client.create_collection(
    collection_name=COLLECTION_NAME,
    vectors_config=VectorParams(size=EMBEDDING_DIM, distance=Distance.COSINE)
)

# 5. 创建 Vector Store
vector_store = QdrantVectorStore(client=client, collection_name=COLLECTION_NAME)

# 6. 指定 Vector Store 的 Storage 用于 index
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
    documents, storage_context=storage_context
)

# 7. 定义检索后排序模型
reranker = LLMRerank(top_n=3)
# 暂时禁用相似度过滤，先确保能检索到内容
# sp = SimilarityPostprocessor(similarity_cutoff=0.3)

# 8. 创建自定义检索器，过滤图片节点
class TextOnlyRetriever:
    def __init__(self, base_retriever):
        self.base_retriever = base_retriever
    
    def retrieve(self, query_str):
        nodes = self.base_retriever.retrieve(query_str)
        # 过滤掉图片节点
        text_nodes = []
        for n in nodes:
            if hasattr(n, "node"):
                node_type = type(n.node).__name__
                if "ImageNode" not in node_type:
                    text_nodes.append(n)
            else:
                text_nodes.append(n)
        return text_nodes

# 定义 RAG Fusion 检索器
base_retriever = TextOnlyRetriever(index.as_retriever())
fusion_retriever = QueryFusionRetriever(
    [base_retriever],
    similarity_top_k=10,  # 提高召回 top k
    num_queries=6,  # 生成更多 query 变体
    use_async=False,
    # query_gen_prompt="",  # 可以自定义 query 生成的 prompt 模板
)

# 9. 构建单轮 query engine
query_engine = RetrieverQueryEngine.from_args(
    fusion_retriever,
    node_postprocessors=[reranker],  # 暂时移除相似度过滤
    response_synthesizer=get_response_synthesizer(
        # 使用紧凑综合模式，常见且稳健
        response_mode=ResponseMode.COMPACT
    )
)

# 10. 对话引擎
chat_engine = CondenseQuestionChatEngine.from_defaults(
    query_engine=query_engine,
    # condense_question_prompt="" # 可以自定义 chat message prompt 模板
)

# 测试多轮对话
# User: deepseek v3有多少参数
# User: 每次激活多少

while True:
    question = input("用户：")
    if question.strip() == "":
        break
    try:
        # 调试：先打印检索到的节点
        try:
            nodes = fusion_retriever.retrieve(question)
            if not nodes:
                print("[RAG] 未检索到任何文档片段。请检查相似度阈值/索引内容。")
            else:
                # 过滤掉图片节点，只保留文本节点
                text_nodes = []
                for n in nodes:
                    if hasattr(n, "node"):
                        node_type = type(n.node).__name__
                        if "ImageNode" not in node_type and "TextNode" in node_type:
                            text_nodes.append(n)
                        else:
                            print(f"[RAG][过滤] 跳过 {node_type} 节点")
                    else:
                        text_nodes.append(n)
                
                print(f"[RAG] 检索到 {len(nodes)} 个节点，过滤后 {len(text_nodes)} 个文本节点（显示前5条）：")
                for i, n in enumerate(text_nodes[:5]):
                    score = getattr(n, "score", None)
                    # 兼容不同版本内容获取
                    snippet = None
                    node_id = None
                    if hasattr(n, "get_text"):
                        snippet = n.get_text()
                    elif hasattr(n, "node") and hasattr(n.node, "get_content"):
                        snippet = n.node.get_content()
                        node_id = getattr(n.node, "node_id", None)
                    elif hasattr(n, "text"):
                        snippet = n.text
                    elif hasattr(n, "node") and hasattr(n.node, "text"):
                        snippet = n.node.text
                        node_id = getattr(n.node, "node_id", None)
                    else:
                        snippet = str(n)
                    # 若片段为空，尝试从 docstore 直接读取
                    if (not snippet or snippet.strip() == "") and hasattr(index, "docstore"):
                        try:
                            if node_id is None and hasattr(n, "node") and hasattr(n.node, "node_id"):
                                node_id = n.node.node_id
                            if node_id:
                                print(f"[RAG][调试] 尝试从 docstore 获取节点 {node_id}")
                                full_node = index.docstore.get_node(node_id)
                                print(f"[RAG][调试] 获取到节点类型: {type(full_node)}")
                                if hasattr(full_node, "text"):
                                    snippet = full_node.text
                                    print(f"[RAG][调试] 从 text 属性获取: {snippet[:50]}...")
                                elif hasattr(full_node, "get_content"):
                                    snippet = full_node.get_content()
                                    print(f"[RAG][调试] 从 get_content 获取: {snippet[:50]}...")
                                else:
                                    snippet = str(full_node)
                                    print(f"[RAG][调试] 从 str 获取: {snippet[:50]}...")
                        except Exception as de:
                            print(f"[RAG][调试] 通过 docstore 获取节点内容失败: {de}")
                    
                    # 如果仍然为空，尝试其他方法
                    if not snippet or snippet.strip() == "":
                        print(f"[RAG][调试] 节点 {node_id} 内容仍为空，尝试其他方法...")
                        try:
                            # 尝试直接从检索器获取原始节点
                            if hasattr(n, "node"):
                                raw_node = n.node
                                if hasattr(raw_node, "text"):
                                    snippet = raw_node.text
                                elif hasattr(raw_node, "get_content"):
                                    snippet = raw_node.get_content()
                                print(f"[RAG][调试] 从原始节点获取: {snippet[:50]}...")
                        except Exception as e:
                            print(f"[RAG][调试] 从原始节点获取失败: {e}")

                    snippet = (snippet or "").replace("\n", " ")[:200]  # 增加显示长度
                    if score is None:
                        print(f"[RAG] 片段#{i+1} node_id={node_id}: {snippet}")
                    else:
                        print(f"[RAG] 片段#{i+1} 相似度={score:.3f} node_id={node_id}: {snippet}")
                        
                # 额外调试：检查原始文档内容
                print(f"[RAG] 调试：检查原始文档内容...")
                for i, doc in enumerate(documents[:2]):  # 只检查前2个文档
                    doc_text = getattr(doc, "text", str(doc))[:100]
                    print(f"[RAG] 原始文档#{i+1}: {doc_text}...")
        except Exception as re:
            print(f"[RAG][检索调试] 无法打印检索片段: {re}")

        response = chat_engine.chat(question)
        text = getattr(response, "response", None)
        if not text:
            text = str(response)
        if not text:
            # 备用路径：直接使用单轮 query 引擎
            try:
                direct = query_engine.query(question)
                text = getattr(direct, "response", None) or str(direct) or "（空响应）"
            except Exception as qe:
                print(f"[RAG][错误] 备用 query 引擎也失败: {qe}")
                text = "（空响应）"
        print(f"AI：{text}")
    except Exception as e:
        print(f"[RAG][错误] 生成回答失败: {e}")
