import asyncio
import json
import time

from datasets import load_dataset

from lagent.agents.stream import AsyncAgentForInternLM, AsyncMathCoder, get_plugin_prompt
from lagent.llms import INTERNLM2_META
from lagent.llms.lmdeploy_wrapper import AsyncLMDeployClient, AsyncLMDeployServer

# 设置异步事件循环
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)

# 初始化 LMDeploy 服务器模型
model = AsyncLMDeployServer(
    path='/root/autodl-tmp/models/internlm2_5-1_8b-chat',  # 模型路径
    meta_template=INTERNLM2_META,  # 使用 InternLM2 的元模板
    model_name='internlm-chat',  # 模型名称
    tp=1,  # 张量并行度
    top_k=1,  # 只保留概率最高的候选词
    temperature=1.0,  # 采样温度
    stop_words=['<|im_end|>', '<|action_end|>'],  # 停止词
    max_new_tokens=1024,  # 最大生成长度
)

# ----------------------- 测试数学解题功能 -----------------------
print('-' * 80, 'interpreter', '-' * 80)

# 加载数学问题数据集
ds = load_dataset('lighteval/MATH', split='test')
problems = [item['problem'] for item in ds.select(range(50))]  # 选取前50个问题

"""
# 以下代码被注释掉，展示了如何进行批量数学问题求解

# 创建数学解题代理
coder = AsyncMathCoder(
    llm=model,
    interpreter=dict(type='AsyncIPythonInterpreter', max_kernels=250))

tic = time.time()
# 创建并发求解任务
coros = [coder(query, session_id=i) for i, query in enumerate(problems)]
res = loop.run_until_complete(asyncio.gather(*coros))
print('-' * 120)
print(f'time elapsed: {time.time() - tic}')

# 保存解题步骤到文件
with open('./tmp_4.json', 'w') as f:
    json.dump([coder.get_steps(i) for i in range(len(res))],
              f,
              ensure_ascii=False,
              indent=4)
"""

# ----------------------- 测试流式对话 -----------------------
async def streaming(llm, problem):
    """
    异步流式对话函数。
    
    Args:
        llm: 语言模型（服务器或客户端）
        problem: 要处理的问题
    """
    messages = [{'role': 'user', 'content': problem}]
    try:
        # 使用 chat 接口而不是 completions
        async for out in llm.stream_chat(messages):
            if isinstance(out, str):
                print(out, end='', flush=True)
            elif isinstance(out, dict):
                if 'choices' in out:
                    print(out['choices'][0]['text'], end='', flush=True)
                else:
                    print(out.get('text', ''), end='', flush=True)
    except Exception as e:
        print(f"\n错误: {str(e)}")

# 测试服务器和客户端的流式对话
print("\n测试服务器模式:")
tic = time.time()
loop.run_until_complete(streaming(model, problems[0]))  # 先测试服务器
print(f"\n服务器模式耗时: {time.time() - tic:.2f}秒")

print("\n测试客户端模式:")
tic = time.time()
loop.run_until_complete(streaming(client, problems[0]))  # 再测试客户端
print(f"\n客户端模式耗时: {time.time() - tic:.2f}秒")

"""
# 以下代码被注释掉，展示了如何测试插件功能

# ----------------------- 测试插件功能 -----------------------
print('-' * 80, 'plugin', '-' * 80)
# 配置 Arxiv 搜索插件
plugins = [dict(type='AsyncArxivSearch')]
agent = AsyncAgentForInternLM(
    llm=model,
    plugins=plugins,
    aggregator=dict(
        type='InternLMToolAggregator',
        plugin_prompt=get_plugin_prompt(plugins)))

# 进行并发插件测试
tic = time.time()
coros = [
    agent(query, session_id=i)
    for i, query in enumerate(['LLM智能体方向的最新论文有哪些？'] * 50)
]
res = loop.run_until_complete(asyncio.gather(*coros))
print('-' * 120)
print(f'time elapsed: {time.time() - tic}')
"""
