import os
from typing import Generator, List, Dict, Any

from openai import OpenAI


# 导入qwen_agent模块



def get_file_list(folder_path):
    # 初始化文件列表
    file_list = []

    # 遍历文件夹
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            # 获取文件的完整路径
            file_path = os.path.join(root, file)
            # 将文件路径添加到列表中
            file_list.append(file_path)
    return file_list

file_list = get_file_list("./docs")
print(file_list)

llm_cfg = {
    "model": "qwen-plus",
    "model_server": "dashscope",
    "api_key": os.getenv("DASHSCOPE_API_KEY"),
    "generta_cfg": {
        "top_p": 0.7
    }
}

class Assistant:
    def __init__(self, llm: Dict[str, Any], system_message: str, function_list: List[Any], files: List[str]):
        self.system_message = system_message
        self.function_list = function_list or []
        self.files = files or []

        model = llm.get("model")
        api_key = llm.get("api_key") or os.getenv("DASHSCOPE_API_KEY")
        model_server = llm.get("model_server", "dashscope")

        base_url = None
        if model_server == "dashscope":
            base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"

        self.client = OpenAI(api_key=api_key, base_url=base_url)
        self.model = model
        self.temperature = llm.get("generta_cfg", {}).get("temperature", 0.7)
        self.top_p = llm.get("generta_cfg", {}).get("top_p", 0.7)

    def run(self, messages: List[Dict[str, str]]) -> Generator[List[Dict[str, str]], None, None]:
        # 将文件名作为上下文提示（不读取内容，避免超长输入）
        files_context = "\n".join([f"- {os.path.basename(p)}" for p in self.files])
        system_prefix = self.system_message
        if files_context:
            system_prefix += f"\n\n可用的参考文件列表:\n{files_context}"

        # 组合为模型输入
        model_messages = [{"role": "system", "content": system_prefix}] + messages

        # 调用模型 (OpenAI Chat Completions 兼容)
        response = self.client.chat.completions.create(
            model=self.model,
            messages=model_messages,
            temperature=self.temperature,
            top_p=self.top_p,
        )
        answer_text = response.choices[0].message.content if response.choices else ""

        yield [{"role": "assistant", "content": answer_text}]


## 构建智能体
bot = Assistant(
    llm=llm_cfg,
    system_message="你是一个保险专家,根据你的经验来准确回答用户的问题",
    function_list=[],
    files=file_list
)


messages = []

while True:
    query = input("请输入问题：")
    if query == "exit":
        break
    messages.append({"role": "user", "content": query})
    res_chunk_list = list(bot.run(messages=messages))
    print("res_chunk_list:", res_chunk_list[-1])
    print("最终回答:", res_chunk_list[-1][0].get("content"))
    # 多轮对话: 把大模型返回的答案添加到messages, 作为聊天上下文
    messages.extend(res_chunk_list[-1])




