import asyncio
import logging
import os
import time
from enum import Enum
from typing import Dict
from fastapi import WebSocket
from openai import OpenAI

logger = logging.getLogger(__name__)


class RateLimiter:
    """Rate limiter for API calls"""

    def __init__(self, max_calls: int = 60, time_window: int = 60):
        self.max_calls = max_calls
        self.time_window = time_window
        self.calls = []

    async def acquire(self):
        now = time.time()
        # Remove old calls outside the time window
        self.calls = [call_time for call_time in self.calls if now - call_time < self.time_window]

        if len(self.calls) >= self.max_calls:
            # Wait until the oldest call expires
            wait_time = self.time_window - (now - self.calls[0])
            if wait_time > 0:
                logger.info(f"Rate limit reached, waiting {wait_time:.2f} seconds")
                await asyncio.sleep(wait_time)

        self.calls.append(now)


class AIClientManager:
    """
    AI客户端管理器，统一管理多个AI服务提供商
    """
    _clients: Dict[str, OpenAI] = {}
    _rate_limiters: Dict[str, RateLimiter] = {}

    @classmethod
    def _create_client(cls, name: str, api_key: str, base_url: str, **kwargs) -> OpenAI:
        """创建AI客户端"""
        try:
            client = OpenAI(
                api_key=api_key,
                base_url=base_url,
                timeout=300.0,
                max_retries=1,
                **kwargs
            )
            logger.info(f"Created AI client for {name}")
            return client
        except Exception as e:
            logger.error(f"Failed to create AI client for {name}: {e}")
            raise

    @classmethod
    def get_client(cls, name: str) -> OpenAI:
        """获取指定的AI客户端"""
        if name not in cls._clients:
            if name == 'deepseek':
                api_key = os.getenv('DEEPSEEK_API_KEY', 'sk-1fca6bc24c8f4664bc2940e5ad04aab5')
                base_url = "https://api.deepseek.com"
            elif name == 'openai':
                api_key = os.getenv('OPENAI_API_KEY', 'sk-f1e4b5164b8b48da9ee7c51966ac9da0')
                base_url = "https://api.openai.com/v1"
            elif name == 'laozhang':
                api_key = os.getenv('LAOZHANG_API_KEY', 'sk-0xgnxLwO809iI25q5c043226D3854b9b9fB78c0b61B1C889')
                base_url = "https://api.laozhang.ai/v1"
            else:
                raise ValueError(f"Unknown AI client: {name}")

            cls._clients[name] = cls._create_client(name, api_key, base_url)
            cls._rate_limiters[name] = RateLimiter()

        return cls._clients[name]

    @classmethod
    async def invoke_with_backoff(cls, client_name: str, **kwargs):
        """带退避策略的AI调用"""
        await cls._rate_limiters[client_name].acquire()

        max_retries = 3
        base_delay = 1

        for attempt in range(max_retries):
            try:
                client = cls.get_client(client_name)
                return await asyncio.to_thread(lambda: client.chat.completions.create(**kwargs))
            except Exception as e:
                if attempt == max_retries - 1:
                    logger.error(f"Final attempt failed for {client_name}: {e}")
                    raise

                delay = base_delay * (2 ** attempt)
                logger.warning(f"Attempt {attempt + 1} failed for {client_name}, retrying in {delay}s: {e}")
                await asyncio.sleep(delay)


# 向后兼容的全局变量
client = AIClientManager.get_client('deepseek')
client_qw = AIClientManager.get_client('openai')
client_gpt = AIClientManager.get_client('laozhang')


class AiApiConstants(Enum):
    DEEPSEEK_R1 = "deepseek-reasoner"
    DEEPSEEK_V3 = "deepseek-chat"
    DEEPSEEK_API_URL = "https://api.deepseek.com"
    API_ENDPOINT = "https://api.example.com"


async def invoke_openai_only_result(query: str, model: str = "deepseek-reasoner"):
    """调用DeepSeek API获取完整回复"""
    try:
        messages = [{"role": "user", "content": query}]
        response = await AIClientManager.invoke_with_backoff(
            'deepseek',
            model=model,
            messages=messages
        )
        return response.choices[0].message.content
    except Exception as e:
        logger.error(f"Failed to invoke DeepSeek API: {e}")
        raise


async def invoke_openai(query: str, ws: WebSocket):
    """调用DeepSeek API进行流式回复"""
    messages = [{"role": "user", "content": query}]

    try:
        await AIClientManager._rate_limiters['deepseek'].acquire()
        client = AIClientManager.get_client('deepseek')

        response = await asyncio.to_thread(
            lambda: client.chat.completions.create(
                model=AiApiConstants.DEEPSEEK_V3.value,
                messages=messages,
                stream=True
            )
        )

        async for chunk in _stream_response(response):
            if chunk.choices[0].delta.content:
                await ws.send_text(chunk.choices[0].delta.content)
                await asyncio.sleep(0.03)

    except Exception as e:
        logger.error(f"Failed to invoke streaming DeepSeek API: {e}")
        await ws.send_text(f"Error: {str(e)}")


async def _stream_response(response):
    """将同步流式响应转换为异步生成器"""
    for chunk in response:
        yield chunk


async def invoke_laozhang_api(
        query: str,
        ws: WebSocket,
        model: str = "gpt-4o-mini",
        system_prompt: str = "You are a helpful assistant."
):
    """调用老张API进行流式回复（控制台同步输出）"""
    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": query}
    ]
    try:
        client = AIClientManager.get_client('laozhang')
        await AIClientManager._rate_limiters['laozhang'].acquire()

        completion = await asyncio.to_thread(
            lambda: client.chat.completions.create(
                model=model,
                stream=True,
                messages=messages
            )
        )

        # 👉 控制台输出缓存
        console_buffer = ""

        async for chunk in _stream_response(completion):
            if chunk.choices[0].delta.content:
                content = chunk.choices[0].delta.content

                # ✅ 控制台连续输出，不换行
                print(content, end="", flush=True)

                # 可选：缓存起来以便后续分析或日志记录
                console_buffer += content

                # ✅ 仍然实时发送给前端
                await ws.send_json({
                    "type": "analysis_stream",
                    "content": content
                })

        print("\n" + "=" * 80)  # 输出结束分割线
        print("完整分析结果预览（控制台缓存）：")
        print(console_buffer[:3000] + ("..." if len(console_buffer) > 3000 else ""))  # 截断防止太长

        await ws.send_json({
            "type": "analysis_stream_done"
        })
        return console_buffer

    except Exception as e:
        logger.error(f"Failed to invoke streaming LaoZhang API: {e}")
        await ws.send_text(f"Error: {str(e)}")


async def invoke_laozhangapi_only_result(
        query: str,
        model: str = "gpt-4o-mini",
        system_prompt: str = "You are a helpful assistant."
):
    """调用老张API进行流式回复（控制台同步输出）"""

    try:
        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": query}
        ]
        response = await AIClientManager.invoke_with_backoff(
            'laozhang',
            model=model,
            messages=messages
        )
        return response.choices[0].message.content
    except Exception as e:
        logger.error(f"Failed to invoke LaoZhang API: {e}")
        raise
