from typing import List, Dict, Any, Optional
from openai import OpenAI
from .base import BaseLLM
from ..server.config import get_settings

class OpenAILLM(BaseLLM):
    def __init__(
        self,
        model: str = "gpt-3.5-turbo",
        temperature: float = 0.7,
        max_tokens: int = 1000
    ):
        """
        初始化OpenAI LLM
        """
        settings = get_settings()
        
        # 初始化客户端
        self.client = OpenAI(
            api_key=settings.OPENAI_API_KEY,
            base_url=settings.OPENAI_BASE_URL
        )
        
        self.model = model
        self.temperature = temperature
        self.max_tokens = max_tokens
    
    def generate(
        self,
        prompt: str,
        context: Optional[List[Dict[str, Any]]] = None,
        **kwargs
    ) -> str:
        """
        生成回答
        """
        # 构建完整提示词
        if context:
            full_prompt = self._build_prompt(prompt, context)
        else:
            full_prompt = prompt
            
        try:
            # 使用新的API调用方式
            response = self.client.chat.completions.create(
                model=self.model,
                messages=[
                    {"role": "system", "content": "你是一个专业的AI助手。"},
                    {"role": "user", "content": full_prompt}
                ],
                temperature=self.temperature,
                max_tokens=self.max_tokens,
                **kwargs
            )
            
            return response.choices[0].message.content.strip()
            
        except Exception as e:
            error_msg = f"OpenAI API调用出错: {str(e)}"
            print(error_msg)
            return error_msg
