"""
OpenAI LLM提供商实现
"""
import json
import time
from typing import Dict, Any, Optional
from .base import ProviderBase
from loguru import logger

# 导入HTTP客户端
from storyforge.utils.http_client import HTTPClient


class LLMOpenAIProvider(ProviderBase):
    """OpenAI LLM提供商"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化OpenAI提供商
        
        Args:
            config: 配置字典，需包含 apiKey, model, base_url(可选), proxy(可选)
        """
        super().__init__(config)
        
        # 从全局配置获取默认值
        from storyforge.config import get_config
        global_config = get_config()
        llm_config = global_config.llm
        
        # 支持apiKey或apiSecret字段
        self.api_key = config.get("apiKey") or config.get("apiSecret")
        self.model = config.get("model", "gpt-4o-mini")
        
        # 处理 base_url
        self.base_url = config.get("base_url") or config.get("endpoint") or "https://api.openai.com/v1"
        

        # 从提供商配置或全局llm配置读取参数
        self.timeout = config.get("timeout") or llm_config.requestTimeout
        self.max_retries = config.get("maxRetries") or llm_config.maxRetries
        self.default_temperature = config.get("temperature") or llm_config.defaultTemperature
        self.default_max_tokens = config.get("maxTokens") or config.get("maxOutputTokens") or llm_config.defaultMaxTokens
        
        # 构建代理配置
        proxy_config = self._build_proxy_config(config)
        
        # 初始化HTTP客户端
        http_config = {
            "proxy": proxy_config,
            "timeout": self.timeout
        }
        self.http_client = HTTPClient(http_config)
        
        if not self.api_key:
            raise ValueError("OpenAI配置缺少apiKey或apiSecret")
    
    def _build_proxy_config(self, config: Dict[str, Any]) -> Optional[Dict[str, str]]:
        """
        构建代理配置字典
        
        Args:
            config: 提供商配置
            
        Returns:
            代理配置字典，格式: {"http": "http://proxy:port", "https": "https://proxy:port"}
        """
        # 优先使用提供商级别的代理配置
        proxy_config = config.get("proxy")
        if proxy_config:
            if isinstance(proxy_config, dict):
                # 如果已经是字典格式
                proxy_dict = {}
                if proxy_config.get("http"):
                    proxy_dict["http"] = proxy_config["http"]
                if proxy_config.get("https"):
                    proxy_dict["https"] = proxy_config["https"]
                
                # 支持用户名密码认证
                username = proxy_config.get("username")
                password = proxy_config.get("password")
                if username and password:
                    # 构建带认证的代理URL
                    if proxy_dict.get("http"):
                        proxy_dict["http"] = proxy_dict["http"].replace(
                            "://", f"://{username}:{password}@"
                        )
                    if proxy_dict.get("https"):
                        proxy_dict["https"] = proxy_dict["https"].replace(
                            "://", f"://{username}:{password}@"
                        )
                
                if proxy_dict:
                    return proxy_dict
        
        # 检查全局代理配置
        from storyforge.config import get_config
        global_config = get_config()
        global_proxy = global_config.proxy if hasattr(global_config, 'proxy') else None
        if global_proxy:
            proxy_dict = {}
            if global_proxy.http:
                proxy_dict["http"] = global_proxy.http
            if global_proxy.https:
                proxy_dict["https"] = global_proxy.https
            
            # 支持用户名密码认证
            if global_proxy.username and global_proxy.password:
                if proxy_dict.get("http"):
                    proxy_dict["http"] = proxy_dict["http"].replace(
                        "://", f"://{global_proxy.username}:{global_proxy.password}@"
                    )
                if proxy_dict.get("https"):
                    proxy_dict["https"] = proxy_dict["https"].replace(
                        "://", f"://{global_proxy.username}:{global_proxy.password}@"
                    )
            
            if proxy_dict:
                return proxy_dict
        
        return None
    
    def name(self) -> str:
        return "openai"
    
    def submit(self, payload: Dict[str, Any]) -> str:
        """
        提交文本生成任务（同步调用，立即返回结果）
        
        Args:
            payload: 包含 messages, temperature, max_tokens 等
            
        Returns:
            任务ID（这里使用时间戳模拟，实际应该返回请求ID）
        """
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": self.model,
            "messages": payload.get("messages", []),
            "temperature": payload.get("temperature", 0.7),
            "max_tokens": payload.get("max_tokens", 2000),
            "response_format": payload.get("response_format")
        }
        
        try:
            # 计算请求信息（用于日志）
            messages = data.get("messages", [])
            total_input_chars = sum(len(msg.get("content", "")) for msg in messages)
            
            logger.debug(f"调用OpenAI API: {self.model}")
            logger.debug(f"  消息数: {len(messages)}, 输入字符: {total_input_chars}, 温度: {data['temperature']}")
            
            start_time = time.time()
            
            # 构造完整URL
            url = f"{self.base_url}/chat/completions"
            
            response = self.http_client.post(url, headers=headers, json_data=data, timeout=self.timeout)
            response.raise_for_status()
            
            elapsed_time = time.time() - start_time
            
            result = response.json()
            
            # 记录使用情况
            usage = result.get("usage", {})
            prompt_tokens = usage.get("prompt_tokens", 0)
            completion_tokens = usage.get("completion_tokens", 0)
            total_tokens = usage.get("total_tokens", 0)
            
            # 提取内容长度
            content = ""
            if "choices" in result and len(result["choices"]) > 0:
                content = result["choices"][0].get("message", {}).get("content", "")
            
            logger.info(f"OpenAI API调用成功")
            logger.info(f"  模型: {self.model}")
            logger.info(f"  耗时: {elapsed_time:.2f}秒")
            logger.info(f"  输出: {len(content)} 字符")
            logger.info(f"  Token使用: 输入={prompt_tokens}, 输出={completion_tokens}, 总计={total_tokens}")
            
            # 记录性能日志（TRACE级别）
            logger.bind(duration=elapsed_time).trace(
                f"API调用: OpenAI {self.model} | "
                f"输入tokens={prompt_tokens} | 输出tokens={completion_tokens}"
            )
            
            # 存储结果到payload中，供后续使用
            job_id = f"openai_{int(time.time() * 1000)}"
            payload["_result"] = result
            payload["_job_id"] = job_id
            
            return job_id
        except Exception as e:
            logger.error(f"OpenAI API调用失败: {e}")
            logger.exception("API调用异常详情:")
            raise
    
    def poll(self, external_job_id: str) -> Dict[str, Any]:
        """
        轮询任务状态（OpenAI是同步的，直接返回结果）
        
        Args:
            external_job_id: 任务ID
            
        Returns:
            状态字典
        """
        # OpenAI是同步调用，这里返回成功状态
        return {
            "status": "succeeded",
            "result_url": None,
            "error": None
        }
    
    def generate(self, messages: list, temperature: Optional[float] = None, 
                 max_tokens: Optional[int] = None, 
                 response_format: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """
        生成文本（便捷方法）
        
        Args:
            messages: 消息列表
            temperature: 温度（None则使用默认值）
            max_tokens: 最大token数（None则使用默认值）
            response_format: 响应格式（如{"type": "json_object"}）
            
        Returns:
            API响应结果
        """
        payload = {
            "messages": messages,
            "temperature": temperature if temperature is not None else self.default_temperature,
            "max_tokens": max_tokens if max_tokens is not None else self.default_max_tokens,
            "response_format": response_format
        }
        job_id = self.submit(payload)
        result = payload.get("_result")
        if result and "choices" in result:
            return result["choices"][0]["message"]["content"]
        raise Exception("生成失败，未获取到结果")
