import json
import time
import logging
import uuid
import random
import string
import asyncio
from typing import Dict, Any, AsyncGenerator

from fastapi.responses import StreamingResponse, JSONResponse

from app.core.config import settings
from app.providers.base_provider import BaseProvider
from app.services.browser_manager import BrowserManager
from app.utils.sse_utils import create_sse_data, create_chat_completion_chunk, DONE_CHUNK

logger = logging.getLogger(__name__)

def generate_random_id(length=16):
    """生成一个指定长度的随机字母数字ID。"""
    characters = string.ascii_letters + string.digits
    return ''.join(random.choice(characters) for _ in range(length))

class AIFreeForeverProvider(BaseProvider):
    """
    v12.2 "手术刀"方案 (语法修正版):
    - 保持 Playwright 持久化上下文。
    - 全面解析 SSE 数据流，正确提取聊天内容。
    - 增强 JavaScript 流解析器的健壮性。
    - [核心修改] 修正了传递给 page.evaluate 的 JS 代码的语法错误。
    """
    def __init__(self, browser_manager: BrowserManager):
        self.browser_manager = browser_manager
        self.base_url = "https://chat.aifreeforever.com"
        self.completions_url = f"{self.base_url}/api/chat"
        self.models_url = f"{self.base_url}/api/models"
        self.models_cache = None
        self.models_last_updated = 0
        logger.info("AIFreeForeverProvider (Playwright-Persistent) v12.2 初始化成功。")

    async def chat_completion(self, request_data: Dict[str, Any]) -> StreamingResponse:
        
        async def stream_generator() -> AsyncGenerator[bytes, None]:
            request_id = f"chatcmpl-{generate_random_id(24)}"
            model_id = request_data.get("model", settings.DEFAULT_MODEL)
            page = None
            
            data_queue = asyncio.Queue()

            try:
                page = await self.browser_manager.get_page()

                async def on_stream_chunk(chunk: str):
                    await data_queue.put(chunk)

                async def on_stream_end():
                    await data_queue.put(None)

                await page.expose_function("onStreamChunk", on_stream_chunk)
                await page.expose_function("onStreamEnd", on_stream_end)
                
                payload = self._prepare_payload(request_data)
                js_code = self._get_fetch_script(self.completions_url, payload)

                # 异步执行 JS，不阻塞
                js_task = asyncio.create_task(page.evaluate(js_code))
                js_task.add_done_callback(
                    lambda t: logger.error("JS evaluation task failed!", exc_info=t.exception()) if t.exception() else None
                )

                # 从队列中读取并解析数据
                while True:
                    chunk_str = await data_queue.get()
                    if chunk_str is None:
                        break
                    
                    try:
                        data = json.loads(chunk_str)
                        if data.get("type") == "text-delta":
                            delta_content = data.get("delta")
                            if delta_content is not None:
                                chunk = create_chat_completion_chunk(request_id, model_id, delta_content)
                                yield create_sse_data(chunk)
                    except json.JSONDecodeError:
                        logger.warning(f"无法解析从JS流收到的数据块: {chunk_str}")
                        continue
                
                final_chunk = create_chat_completion_chunk(request_id, model_id, "", "stop")
                yield create_sse_data(final_chunk)
                yield DONE_CHUNK

            except Exception as e:
                logger.error(f"处理流时发生错误: {e}", exc_info=True)
                error_message = f"内部服务器错误: {str(e)}"
                error_chunk = create_chat_completion_chunk(request_id, model_id, error_message, "stop")
                yield create_sse_data(error_chunk)
                yield DONE_CHUNK
            finally:
                if page:
                    try:
                        # 使用 lambda 创建一个空函数来“清空”暴露的函数
                        await page.expose_function("onStreamChunk", lambda _: None)
                        await page.expose_function("onStreamEnd", lambda: None)
                    except Exception as e:
                        logger.warning(f"移除暴露函数时出错: {e}")
                    self.browser_manager.release_page(page)

        return StreamingResponse(stream_generator(), media_type="text/event-stream")

    def _get_fetch_script(self, url: str, payload: Dict) -> str:
        """
        [核心修改] 生成在浏览器上下文中执行的、用于流式 fetch 的 JavaScript 代码。
        此版本移除了末尾多余的 ()，以修复 "Unexpected token '('" 语法错误。
        """
        return f"""
        async () => {{
            try {{
                const response = await fetch('{url}', {{
                    method: 'POST',
                    headers: {{
                        'Content-Type': 'application/json',
                        'Accept': '*/*'
                    }},
                    body: JSON.stringify({json.dumps(payload)})
                }});

                if (!response.ok) {{
                    console.error(`Fetch failed with status: ${{response.status}}`);
                    const errorBody = await response.text();
                    console.error(`Error body: ${{errorBody}}`);
                    await onStreamEnd();
                    return;
                }}

                const reader = response.body.getReader();
                const decoder = new TextDecoder();
                let buffer = '';

                while (true) {{
                    const {{ done, value }} = await reader.read();
                    
                    if (done) {{
                        if (buffer && buffer.startsWith('data:')) {{
                            const data = buffer.substring(6).trim();
                            if (data && data !== '[DONE]') {{
                                await onStreamChunk(data);
                            }}
                        }}
                        break;
                    }}

                    buffer += decoder.decode(value, {{ stream: true }});

                    let newlineIndex;
                    while ((newlineIndex = buffer.indexOf('\\n')) >= 0) {{
                        const line = buffer.substring(0, newlineIndex);
                        buffer = buffer.substring(newlineIndex + 1);

                        if (line.trim() === '') {{
                            continue;
                        }}

                        if (line.startsWith('data:')) {{
                            const data = line.substring(6).trim();
                            if (data && data !== '[DONE]') {{
                                await onStreamChunk(data);
                            }}
                        }}
                    }}
                }}
            }} catch (e) {{
                console.error('Error during fetch stream:', e);
            }} finally {{
                await onStreamEnd();
            }}
        }}
        """

    def _prepare_payload(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
        openai_messages = request_data.get("messages", [])
        
        formatted_messages = []
        for msg in openai_messages:
            formatted_messages.append({
                "parts": [{"type": "text", "text": msg.get("content")}],
                "id": generate_random_id(),
                "role": msg.get("role")
            })

        return {
            "modelId": request_data.get("model", settings.DEFAULT_MODEL),
            "id": generate_random_id(),
            "messages": formatted_messages,
            "trigger": "submit-message"
        }

    async def get_models(self) -> JSONResponse:
        if self.models_cache and (time.time() - self.models_last_updated < 3600):
            return JSONResponse(content=self.models_cache)

        page = None
        try:
            page = await self.browser_manager.get_page()
            
            response_json = await page.evaluate(f"""
                async () => {{
                    const response = await fetch('{self.models_url}');
                    return await response.json();
                }}()
            """)

            formatted_data = {
                "object": "list",
                "data": [
                    {
                        "id": model.get("id"),
                        "object": "model",
                        "created": int(time.time()),
                        "owned_by": model.get("specification", {}).get("provider", "unknown")
                    }
                    for model in response_json.get("models", [])
                ]
            }
            self.models_cache = formatted_data
            self.models_last_updated = time.time()
            return JSONResponse(content=formatted_data)
        except Exception as e:
            logger.error(f"获取模型列表失败，将回退到默认列表: {e}")
            default_models = {
                "object": "list",
                "data": [
                    {"id": name, "object": "model", "created": int(time.time()), "owned_by": "lzA6-fallback"}
                    for name in settings.KNOWN_MODELS
                ]
            }
            if not self.models_cache:
                self.models_cache = default_models
            return JSONResponse(content=self.models_cache)
        finally:
            if page:
                self.browser_manager.release_page(page)
