import asyncio
import unittest

from langchain_core.messages import HumanMessage

from mcp_server_graph.utils.graph_service import GraphService
from mcp_server_graph.utils.text2cypher_service import _generate_llm
from mcp_server_graph.utils.timer_util import Timer


class TestGraphServiceOperations(unittest.TestCase):
    def test_get_rand_users(self):
        graph_service = GraphService()
        users = graph_service.get_rand_users(10)
        self.assertEqual(len(users), 10)


    def test_llm(self):

        init_timer = Timer("初始化llm")
        init_timer.start()
        llm = _generate_llm()
        init_timer.end()

        generate_timer = Timer("llm generate")
        generate_timer.start()
        messages = [[HumanMessage(content="介绍一下鸟巢,100字")]]
        print(llm.generate(messages))
        generate_timer.end()

        ainvoke_timer = Timer("llm ainvoke")
        ainvoke_timer.start()
        response = asyncio.run(llm.ainvoke("介绍一下鸟巢,100字"))
        print(response)
        ainvoke_timer.end()

        astream_timer = Timer("llm astream")
        astream_timer.start()

        async def stream_response(prompt):
            print("开始生成回答：")
            async for chunk in llm.astream(prompt):
                print(chunk.content, end="", flush=True)
            print()  # 换行

        asyncio.run(stream_response("介绍一下鸟巢,100字"))
        astream_timer.end()
