import asyncio
import logging
from typing import List, Dict, AsyncGenerator, Tuple, Optional

import openai
from openai import AsyncOpenAI

from app.common.global_vars import AI_API_CONFIGS
from app.common.shared_utils import TokenUsage

logger = logging.getLogger(__name__)

class APIManager:
    """API管理器，负责API重试和切换"""
    
    def __init__(self, max_retries: int = 3, timeout: int = 60):
        self.api_configs = AI_API_CONFIGS
        self.max_retries = max_retries
        self.timeout = timeout
        self.clients = {}
        
        # 初始化所有API客户端
        for config in self.api_configs:
            self.clients[config["name"]] = AsyncOpenAI(
                api_key=config["api_key"],
                base_url=config["base_url"],
                timeout=timeout
            )
    
    async def call_api_with_retry(self, messages: List[Dict[str, str]], 
                                 model: str = "gpt-4.1", 
                                 stream: bool = True) -> AsyncGenerator[Tuple[str, Optional[TokenUsage]], None]:
        """
        调用API并处理重试和切换
        返回: (响应内容, token使用情况)
        """
        api_index = 0
        
        while api_index < len(self.api_configs):
            config = self.api_configs[api_index]
            client = self.clients[config["name"]]
            retry_count = 0
            
            while retry_count < self.max_retries:
                try:
                    logger.info(f"尝试使用 {config['name']} (重试次数: {retry_count + 1})")
                    
                    if stream:
                        async for chunk, usage in self._stream_response(client, messages, model):
                            yield chunk, usage
                        return  # 成功返回，退出所有循环
                    else:
                        response = await client.chat.completions.create(
                            model=model,
                            messages=messages,
                            timeout=self.timeout
                        )
                        
                        content = response.choices[0].message.content
                        usage = TokenUsage()
                        if response.usage:
                            usage.add_usage(
                                response.usage.prompt_tokens,
                                response.usage.completion_tokens
                            )
                        
                        yield content, usage
                        return
                        
                except Exception as e:
                    retry_count += 1
                    logger.warning(f"{config['name']} 第{retry_count}次重试失败: {str(e)}")
                    
                    if retry_count >= self.max_retries:
                        logger.error(f"{config['name']} 达到最大重试次数，切换到下一个API")
                        break
                    
                    # 等待一段时间后重试
                    await asyncio.sleep(2 ** retry_count)  # 指数退避
            
            api_index += 1
        
        # 所有API都失败
        raise Exception("所有API都已尝试完毕，全部失败")
    
    async def _stream_response(self, client: AsyncOpenAI, messages: List[Dict[str, str]], 
                             model: str) -> AsyncGenerator[Tuple[str, Optional[TokenUsage]], None]:
        """处理流式响应"""
        try:
            stream = await client.chat.completions.create(
                model=model,
                messages=messages,
                stream=True,
                stream_options={"include_usage": True},
                timeout=self.timeout
            )
            
            total_content = ""
            usage = None
            
            async for chunk in stream:
                if chunk.choices and chunk.choices[0].delta:
                    content = chunk.choices[0].delta.content
                    if content:
                        total_content += content
                        yield content, None
                
                # 检查是否有使用统计信息
                if hasattr(chunk, 'usage') and chunk.usage:
                    usage = TokenUsage()
                    usage.add_usage(
                        chunk.usage.prompt_tokens,
                        chunk.usage.completion_tokens
                    )
            
            # 如果没有获取到usage信息，估算token数量
            if usage is None:
                usage = TokenUsage()
                # 简单估算（实际应用中可以使用tiktoken等库精确计算）
                estimated_input_tokens = sum(len(msg["content"].split()) for msg in messages) * 1.3
                estimated_output_tokens = len(total_content.split()) * 1.3
                usage.add_usage(int(estimated_input_tokens), int(estimated_output_tokens))
            
            yield "", usage  # 发送最终的usage信息
            
        except Exception as e:
            logger.error(f"流式响应处理失败: {str(e)}")
            raise 