import asyncio
import datetime
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.extractors import KeywordExtractor
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import TextNode, NodeWithScore
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm

from llama_index.core.base.base_selector import (
    BaseSelector,
    MultiSelection,
    SingleSelection,
    SelectorResult,
)
from llama_index.core.selectors.embedding_selectors import EmbeddingSingleSelector
from llama_index.core.selectors.llm_selectors import (
    LLMMultiSelector,
    LLMSingleSelector,
)
from llama_index.core.selectors.pydantic_selectors import (
    PydanticMultiSelector,
    PydanticSingleSelector,
)
# TODO: Deprecated import support for old text splitters
from llama_index.core.node_parser.text.code import CodeSplitter
from llama_index.core.node_parser.text.sentence import (
    SentenceSplitter,

)
from llama_index.core.node_parser.text.token import TokenTextSplitter

docs=[Document(text="LlamaIndex中的LLMQuestionGenerator是一个用于生成子问题的关键组件，"
                    "主要应用于检索增强生成(RAG)流程中的查询分解场景。该组件通过大语言模型(LLM)将复杂查询拆解为多"
                    "个逻辑相关的子问题，从而提升检索和回答的精准度")]
sentenceSplitter = SentenceSplitter(chunk_size=50, chunk_overlap=20)

nodes = sentenceSplitter.get_nodes_from_documents(docs)
print(nodes)

