import asyncio
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.indices.vector_store import VectorIndexRetriever
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer, \
    VectorStoreIndex, StorageContext
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel



embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import HierarchicalNodeParser
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.schema import IndexNode

# 1. 文档加载与分块
documents = SimpleDirectoryReader("./data").load_data()

# 2. 创建层次化节点解析器（父节点：整篇文档，子节点：段落）
node_parser = HierarchicalNodeParser.from_defaults(
    chunk_sizes=[2048, 512]  # 父块2k tokens，子块512 tokens
)
nodes = node_parser.get_nodes_from_documents(documents)

print(nodes)

# 3. 构建节点关系映射
node_dict = {n.node_id: n for n in nodes}
base_nodes = [n for n in nodes if not n.ref_doc_id]  # 父节点
sub_nodes = [n for n in nodes if n.ref_doc_id]       # 子节点

# 4. 创建递归检索器
index = VectorStoreIndex(base_nodes)
retriever = RecursiveRetriever(
    "vector",
    retriever_dict={"vector": index.as_retriever()},
    node_dict=node_dict,
    verbose=True
)

# 5. 执行递归查询
query = "主要技术参数"
results = retriever.retrieve(query)
for node in results:
    print(f"检索到节点: {node.node_id} (分数: {node.score:.2f})")
    print(node.text[:200] + "...\n")