from openai import OpenAI

class ChatGLM_LLM:
    # def __init__(self, api_key: str, mmodel_name: str, url: str):
    #     self.client = OpenAI(api_key=api_key, base_url=url)
    #     self.model_name = mmodel_name

    def __init__(self, config):
        # config = {
        #     "model_name": "GLM-4V-Flash",
        #     "api_key": "41509314997346de8e662536b25b3d5f.uITLVjRTk0pvLjSY",
        #     "url": "https://open.bigmodel.cn/api/paas/v4/"
        # }
        # self.client = OpenAI(api_key=config.get("api_key"), base_url=config.get("url"))
        self.model_name = config.get("model_name")
        self.api_key = config.get("api_key")
        self.url = config.get("url")
        self.client = OpenAI(api_key=self.api_key, base_url=self.url)
        self.dialogue_history = [
            {"role": "system", "content": "你是一个幽默的助手，请用风趣的方式回答"}
        ]

    def generate_response(self, user_info):
        try:
            self.dialogue_history.append({"role": "user", "content": user_info})
            response = self.client.chat.completions.create(
                model=self.model_name,
                messages=self.dialogue_history,
            )
            return response.choices[0].message.content
        except Exception as e:
            # 处理API错误/网络错误
            print(f"API调用失败: {str(e)}")
            return "抱歉，我现在无法回答这个问题"

