import os
import json
from openai import OpenAI, APIError, APIConnectionError, AuthenticationError, RateLimitError

class LLMClient:
    def __init__(self, api_key: str = None, model: str = None):
        self.api_key = os.getenv("LLM_API_KEY")
        self.model = os.getenv('LLM_MODEL') 
        
        if not self.api_key:
            raise ValueError("API密钥未提供，请传入api_key参数或设置LLM_API_KEY环境变量")

    def get_response(self, messages: list[dict[str, str]], stream: bool = False) -> str:
        """
        向LLM发送请求并获取响应
        
        参数:
            messages: 对话历史列表，格式为[{"role": "user", "content": "..."}, ...]
            stream: 是否启用流式输出
        
        返回:
            完整的响应文本
        """
        try:
            client = OpenAI(
                base_url='https://api.siliconflow.cn/v1',
                api_key=self.api_key
            )
            
            response = client.chat.completions.create(
                model=self.model,
                messages=messages,
            )

            if response.choices and response.choices[0]:
                return response.choices[0].message.content
            else:
                return "未获取到有效响应内容"
                
        except AuthenticationError:
            print("认证失败: 请检查API密钥是否正确")
            raise
        except APIConnectionError:
            print("连接失败: 无法连接到LLM服务，请检查网络或服务地址")
            raise
        except RateLimitError:
            print("速率限制: 已超过API调用限额，请稍后再试")
            raise
        except APIError as e:
            print(f"API错误: {str(e)}")
            raise
        except Exception as e:
            print(f"发生未知错误: {str(e)}")
            raise