"""
AI服务模块，集成现有的AI服务封装
"""

import re
import asyncio
import logging
from typing import List, Dict, Optional, AsyncGenerator
from pydantic import BaseModel
from src.utils.http import ExamResult
from src.utils.config import DATA_DIR
from src.utils.http.answer.typings import ExamDetail


# 导入现有的client_create函数
from . import client_create

# 导入prompt渲染函数
from .prompt import render_prompt

logger = logging.getLogger(__name__)
pattern = r"([。！？；：\n])"


class ChatMessage(BaseModel):
    """聊天消息模型"""

    role: str  # system, user, assistant
    content: str


class ChatRequest(BaseModel):
    """聊天请求模型"""

    messages: List[ChatMessage]
    exam_context: Optional[Dict] = None
    user_exam_id: str
    stream: bool = False


class ChatResponse(BaseModel):
    """聊天响应模型"""

    content: str
    usage: Optional[Dict] = None


class AIService:
    """AI服务类"""

    def __init__(self, llm_name: Optional[str] = None):
        """
        初始化AI服务

        Args:
            llm_name: 指定特定的LLM模型名称，不填写则使用配置中的默认模型
        """
        self.llm_name = llm_name

    async def _prepare_messages(self, request: ChatRequest) -> List[Dict[str, str]]:
        # result_file = DATA_DIR.joinpath(f"{request.user_exam_id}.json")
        print(request.exam_context)
        messages: List[Dict[str, str]] = [
            {
                "role": "system",
                "content": render_prompt(
                    "learning_assistant.jinja", model=ExamDetail.model_json_schema(), exam_context=request.exam_context
                ),
            }
        ]

        # 添加历史消息
        messages.extend(
            {"role": msg.role, "content": msg.content} for msg in request.messages
        )
        
        # 调试：打印system消息的前500字符
        # print(f"🎨 System prompt (前500字符): {messages[0]['content'][:500]}...")
        print(messages)
        return messages

    async def _get_ai_response(self, messages: List[Dict[str, str]]):
        """获取AI响应"""
        return await client_create(
            messages=messages,  # type: ignore
            max_tokens=1000,
            temperature=0.7,
            llm_name=self.llm_name,
        )

    async def chat(self, request: ChatRequest) -> ChatResponse:
        """
        处理聊天请求

        Args:
            request: 聊天请求

        Returns:
            ChatResponse: 聊天响应
        """
        try:
            messages = await self._prepare_messages(request)
            response = await self._get_ai_response(messages)

            # 提取响应内容
            content = response.choices[0].message.content or ""
            usage = {
                "prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
                "completion_tokens": (
                    response.usage.completion_tokens if response.usage else 0
                ),
                "total_tokens": response.usage.total_tokens if response.usage else 0,
            }

            return ChatResponse(content=content, usage=usage)

        except Exception as e:
            logger.error(f"AI聊天服务错误: {e}")
            return ChatResponse(content=self._get_fallback_response(request))

    def _get_fallback_response(self, request: ChatRequest) -> str:
        """获取失败回退响应"""
        return render_prompt(
            "fallback_response.jinja",
            user_input=request.messages[-1].content if request.messages else "",
        )

    async def chat_stream(self, request: ChatRequest) -> AsyncGenerator[str, None]:
        """
        流式聊天响应

        Args:
            request: 聊天请求

        Yields:
            str: 流式响应内容
        """
        try:
            messages = await self._prepare_messages(request)

            # 使用分块模拟流式输出
            logger.info("使用分块模拟流式输出")
            response = await self._get_ai_response(messages)

            # 获取完整响应
            full_content = response.choices[0].message.content or ""

            # 按标点符号分割，保持语义完整性
            sentences = re.split(pattern, full_content)
            current_chunk = ""

            for i, part in enumerate(sentences):
                current_chunk += part

                # 当遇到标点符号或累积到一定长度时输出
                if (i % 2 == 1 and part in "。！？；：\n") or len(current_chunk) >= 10:
                    if current_chunk.strip():
                        yield current_chunk
                        current_chunk = ""
                        await asyncio.sleep(0.03)  # 自然的停顿

            # 输出剩余内容
            if current_chunk.strip():
                yield current_chunk

        except Exception as e:
            logger.error(f"AI流式聊天服务错误: {e}")
            yield self._get_fallback_response(request)


# 全局AI服务实例
ai_service = AIService()


async def get_ai_service() -> AIService:
    """获取AI服务实例"""
    return ai_service
