import dataclasses

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm


def query_info(
        name: str= None
) -> str:
    """根据用户名称查询个人信息 .
    Args:
        name (str):用户名称.
    """

    print('sss')
    return "你好"

text="""今天（10月5日）14时50分前后，
今年第21号台风“麦德姆”的中心以强台风级别在广东省湛江市徐闻县东部沿海登陆，
登陆时中心附近最大风力14级（42米/秒，强台风级），中心最低气压965百帕。"""
from llama_index.core.tools import FunctionTool,QueryEngineTool,RetrieverTool

query_info_tool = FunctionTool.from_defaults(
    name=f"vector_tool_query_info", fn=query_info,return_direct=True
)



node = TextNode(text=text, metadata={"title": "金秋十月"})
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.storage.index_store import SimpleIndexStore
from llama_index.core.vector_stores import SimpleVectorStore

from llama_index.core import StorageContext, load_index_from_storage
storage_context = StorageContext.from_defaults(
    docstore=SimpleDocumentStore(),
    vector_store=SimpleVectorStore(),
    index_store=SimpleIndexStore(),
)
node001=TextNode(text=text)
nodes=[node]
keyword_index = SimpleKeywordTableIndex(
    nodes,
    show_progress=True,
)

query_engine=keyword_index.as_query_engine()

query_engine_tool=QueryEngineTool.from_defaults(query_engine,description="关于麦德姆的信息")

summary_index = SummaryIndex(nodes)
list_retriever = summary_index.as_retriever()

list_tool = RetrieverTool.from_defaults(
    retriever=list_retriever,
    description=(
        "关于麦德姆的信息"
    ),

)


import asyncio



from llama_index.core import Settings
from llama_index.core.callbacks import LlamaDebugHandler
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler,TokenCountingHandler

debug_handler = LlamaDebugHandler(print_trace_on_end=True)
simpleLLMHandler=SimpleLLMHandler()
tokenCountingHandler=TokenCountingHandler()

callback_manager = CallbackManager(handlers=[debug_handler,simpleLLMHandler,tokenCountingHandler])

Settings.callback_manager = callback_manager

class Company(BaseModel):
    """Data model for a companies mentioned."""
    date: str
    location: str

async def main():
    fa = FunctionAgent(tools=[list_tool],output_cls=Company)
    name= await fa.run("查询百度相关的信息")
    print(name)


if __name__ == '__main__':

    asyncio.run(main())


print('okkk')

