import asyncio
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.node_parser import HierarchicalNodeParser, LlamaParseJsonNodeParser
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer, StorageContext, \
    VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel, Field

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


from llama_index.core.node_parser.text.semantic_splitter import (
    SemanticSplitterNodeParser,
)
from llama_index.core.node_parser.relational.hierarchical import (
    HierarchicalNodeParser,
)
from llama_index.core.node_parser.relational.markdown_element import (
    MarkdownElementNodeParser,
)
from llama_index.core.node_parser.relational.unstructured_element import (
    UnstructuredElementNodeParser,
)
from llama_index.core.node_parser.relational.llama_parse_json_element import (
    LlamaParseJsonNodeParser,
)

text='''
人无精神则不立，国无精神则不强。国家强盛、民族复兴需要物质文明的积累，更需要精神文明的升华。辉煌灿烂的中华文化，蕴含着培根铸魂、启智润心的深厚力量。
日前，淮北市相山区渠沟镇村民宋士佳、宋士奎和宋黎明三人共同施救落水市民。然而，宋士佳在施救后不幸离世。
'''
import os
import pickle
from pathlib import Path
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import UnstructuredElementNodeParser

# 加载文档
reader = FlatReader()
doc = reader.load_data(Path("./html/1.html"))  # 替换为实际文件路径

# 创建解析器实例
node_parser = UnstructuredElementNodeParser()


raw_nodes = node_parser.get_nodes_from_documents(doc)


# 输出解析结果
print(f"共解析出 {len(raw_nodes)} 个节点")
for i, node in enumerate(raw_nodes[:3]):  # 示例展示前3个节点
    print(f"\n节点 {i+1} 类型: {type(node)}")
    print(f"内容摘要: {node.text[:100]}...")

'''
documents = SimpleDirectoryReader("./json_data").load_data()

# 初始化解析器（可配置字段白名单/黑名单）
json_parser = LlamaParseJsonNodeParser()



print(nodes)


from llama_index.core.node_parser import SentenceSplitter, SimpleNodeParser
from llama_index.core import Document
sentenceSplitter=SentenceSplitter(chunk_size=50,chunk_overlap=10)

documents = SimpleDirectoryReader("./data").load_data()
node_parser=SemanticSplitterNodeParser.from_defaults(embed_model=embed_model,sentence_splitter=sentenceSplitter)

nodes=node_parser.get_nodes_from_documents(documents=documents)

print(len(nodes))



documents = SimpleDirectoryReader("./data").load_data()

node_parser = SentenceWindowNodeParser.from_defaults(
    window_size=2,
    window_metadata_key="window",
    original_text_metadata_key="original_text",

)
nodes=node_parser.get_nodes_from_documents(documents=documents)

for node in enumerate( nodes):
    first_node =node[1]
    print(first_node)
    print(f"核心句子: {first_node.get_content()}")
    print(f"窗口上下文: { first_node.metadata['window']}")
    print(f"原始文本: { first_node.metadata['original_text']}")


from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import Document
# 加载代码文档
document = Document(text="""\
class MyClass:
    def method1(self):
        print('Hello')

    # 重要注释
    def method2(self):
        return 42
""")

# 创建分割器（Python语言模式）
splitter = CodeSplitter(language="python")
nodes = splitter.get_nodes_from_documents([document])

for node in nodes:
    print(node)
    print("------------------")



splitter = SentenceSplitter(chunk_size=512, chunk_overlap=64)
document = Document(text="这是第一段。\n\n这是第二段。")
nodes = splitter.get_nodes_from_documents([document])

splitter =SentenceSplitter()

rs=splitter.split_text(text)

for item in rs:
    print(item)
    print("-----------------")
print(len(rs))


splitter =TokenTextSplitter(chunk_size=300,chunk_overlap=10)

rs=splitter.split_text(text)

print(len(rs))
'''



