from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.llms import ChatMessage
from llama_index.core.agent.workflow import ReActAgent, FunctionAgent
from llama_index.core.workflow import Context
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai_like import OpenAILike
from llama_index.core.llms import ChatMessage, MessageRole,LLM
from asyncio import run
import uvicorn

from os import getenv
from pathlib import Path
import  asyncio

from DeepSeekChat import DeepSeekChat

# 阿里云百炼平台OpenAI兼容接口
deepseek_chat = DeepSeekChat(api_key="sk-605e60a1301040759a821b6b677556fb", base_url="https://api.deepseek.com")
 
 

memory = ChatMemoryBuffer.from_defaults(token_limit=40000)

chat_history = [
    ChatMessage(role="user", content="Hello, how are you?"),
    ChatMessage(role="assistant", content="I'm doing well, thank you!"),
]

# put a list of messages
memory.put_messages(chat_history)

history = memory.get()
all_history = memory.get_all()

print(history)


memory = ChatMemoryBuffer.from_defaults(token_limit=40000)

agent = FunctionAgent(tools=[], llm=deepseek_chat)
ctx = Context(agent)



# context to hold the chat history/state

async def main():
    '''hoelo'''
    print("OK")
    resp = await agent.run("Hello,I am Jack", ctx=ctx, memory=memory)
    resp = await agent.run("Hello, who am i?", ctx=ctx, memory=memory)
    print("OK----")
    print(memory.get_all())
   

# 使用示例
if __name__ == "__main__":
    asyncio.run(main())