import httpx
import json
import uuid
import time
import logging
import asyncio
from typing import Dict, Any, Union, AsyncGenerator
from urllib.parse import quote_plus

from fastapi import Request, HTTPException
from fastapi.responses import StreamingResponse
from bs4 import BeautifulSoup
from markdownify import markdownify as md # 导入 markdownify 库

from app.providers.base_provider import BaseProvider
from app.core.config import settings

logger = logging.getLogger(__name__)

class How2SolutionsProvider(BaseProvider):
    """
    how2solutions.com 服务提供商 (v2.4 - Markdown 最终版)
    """
    def __init__(self):
        if not settings.HOW2SOLUTIONS_NONCE:
            error_msg = "配置错误: .env 文件中缺少 HOW2SOLUTIONS_NONCE。"
            logger.critical(error_msg)
            raise ValueError(error_msg)

    async def chat_completion(self, request_data: Dict[str, Any], original_request: Request) -> StreamingResponse:
        """
        处理聊天请求，将获取到的 HTML 转换为 Markdown，并以流式响应返回。
        """
        model = request_data.get("model", settings.DEFAULT_MODEL)
        try:
            # 1. 获取上游响应
            upstream_response = await self._make_request(request_data)
            
            # 2. 检查响应是否成功
            if not upstream_response.get("success") or "data" not in upstream_response:
                full_content = f"上游服务器错误: {upstream_response.get('data', '未知错误')}"
            else:
                html_content = upstream_response["data"].get("content", "")
                if not html_content:
                    full_content = ""
                else:
                    # 【核心修正】将 HTML 转换为 Markdown
                    # 使用 heading_style="ATX" 来生成标准的 #、## 样式的标题
                    logger.info("正在将上游返回的 HTML 转换为 Markdown...")
                    full_content = md(html_content, heading_style="ATX")
                    logger.info(f"Markdown 转换完成，内容长度: {len(full_content)}")

            # 3. 以伪流式返回转换后的 Markdown 内容
            return StreamingResponse(
                self._pseudo_stream_generator(full_content, model), 
                media_type="text/event-stream"
            )

        except Exception as e:
            logger.error(f"处理请求失败: {e}", exc_info=True)
            error_content = f"处理请求时发生内部错误: {e}"
            return StreamingResponse(
                self._pseudo_stream_generator(error_content, "error-model"),
                media_type="text/event-stream"
            )

    async def _make_request(self, request_data: Dict[str, Any]) -> Dict:
        """向上游服务发起请求"""
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36",
            "Referer": "https://how2solutions.com/",
            "Origin": "https://how2solutions.com",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "X-Requested-With": "XMLHttpRequest",
        }

        last_user_message = next((msg["content"] for msg in reversed(request_data.get("messages", [])) if msg["role"] == "user"), "")
        if not last_user_message:
            raise ValueError("请求中未找到用户消息。")

        encoded_question = quote_plus(last_user_message)
        raw_data = f"action=aikb_ask_question&nonce={settings.HOW2SOLUTIONS_NONCE}&question={encoded_question}"
        
        api_url = "https://how2solutions.com/wp-admin/admin-ajax.php"

        async with httpx.AsyncClient(timeout=settings.API_REQUEST_TIMEOUT) as client:
            logger.info(f"向上游发送请求: {api_url} | 问题: {last_user_message[:50]}...")
            response = await client.post(api_url, headers=headers, content=raw_data)
            
            response_text = response.text
            logger.info(f"上游服务器原始响应 (长度: {len(response_text)}): {response_text}")

            response.raise_for_status()
            
            return json.loads(response_text)

    async def _pseudo_stream_generator(self, full_content: str, model: str) -> AsyncGenerator[str, None]:
        """
        一个将完整文本模拟成 OpenAI 流式输出的异步生成器。
        """
        chat_id = f"chatcmpl-{uuid.uuid4().hex}"
        created_time = int(time.time())

        # 1. 发送第一个数据块，包含角色信息
        first_chunk = {
            "id": chat_id,
            "object": "chat.completion.chunk",
            "created": created_time,
            "model": model,
            "choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "finish_reason": None}]
        }
        yield f"data: {json.dumps(first_chunk)}\n\n"
        
        # 2. 将完整内容逐字或逐块发送
        chunk_size = 1000 # 可以调整每次发送的字符数
        for i in range(0, len(full_content), chunk_size):
            content_chunk_text = full_content[i:i+chunk_size]
            
            chunk = {
                "id": chat_id,
                "object": "chat.completion.chunk",
                "created": created_time,
                "model": model,
                "choices": [{"index": 0, "delta": {"content": content_chunk_text}, "finish_reason": None}]
            }
            yield f"data: {json.dumps(chunk)}\n\n"
            await asyncio.sleep(0.001) # 模拟打字机效果的微小延迟

        # 3. 发送最后一个数据块，包含结束原因
        last_chunk = {
            "id": chat_id,
            "object": "chat.completion.chunk",
            "created": created_time,
            "model": model,
            "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]
        }
        yield f"data: {json.dumps(last_chunk)}\n\n"

        # 4. 发送流式输出的结束标志
        yield "data: [DONE]\n\n"
        logger.info("Markdown 伪流式响应发送完成。")

    async def get_models(self) -> Dict:
        """返回支持的模型列表"""
        return {
            "object": "list",
            "data": [{"id": name, "object": "model", "created": int(time.time()), "owned_by": "system"} for name in settings.KNOWN_MODELS]
        }
