from openai import OpenAI

API_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
API_KEY = "sk-6bf84288ecdc45298ef47b7c28380766"
class LLMClient:
    def __init__(self, api_key=API_KEY, base_url=API_BASE_URL):
        """初始化LLM客户端"""
        self.client = OpenAI(
            api_key=api_key,
            base_url=base_url,
        )

    def chat(self, messages, model="qwen-turbo-2024-06-24"):
        """与LLM交互

        Args:
            messages: 消息列表
            model: 使用的LLM模型

        Returns:
            tuple: (content, reasoning_content)
        """
        try:
            # print(f"LLM请求: {messages}")
            response = self.client.chat.completions.create(
                model=model,
                messages=messages,
                stream=True  # 启用流模式
            )

            full_content = ""
            current_line = ""
            line_length = 50  # 每行的最大字符数

            for chunk in response:
                if hasattr(chunk, "choices") and chunk.choices:
                    delta = chunk.choices[0].delta
                    if hasattr(delta, "content") and delta.content:
                        content_chunk = delta.content
                        full_content += content_chunk

                        # 将内容逐行打印
                        while len(current_line) + len(content_chunk) > line_length:
                            print(current_line, end='', flush=True)
                            current_line = content_chunk[:line_length - len(current_line)]
                            content_chunk = content_chunk[line_length - len(current_line):]
                        current_line += content_chunk

            if current_line:
                print(current_line, end='', flush=True)

            print(f"\nLLM推理内容: {full_content}")
            return full_content, ""

        except Exception as e:
            print(f"LLM调用出错: {str(e)}")
            return "", ""


# 使用示例
if __name__ == "__main__":
    llm = LLMClient()
    messages = [
        {"role": "user", "content": "你好"}
    ]
    response = llm.chat(messages)
    print(f"响应: {response}")