"""AI service integration using LangChain.

Provides unified interface for LLM and image generation.
"""

from __future__ import annotations

import logging
from typing import Optional, Dict, Any, List, AsyncGenerator
from pathlib import Path
import json
import httpx
from datetime import datetime
import os
import uuid
from urllib.parse import urlparse, unquote
from pathlib import PurePosixPath

from langchain.llms.base import LLM
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
from langchain.callbacks.manager import CallbackManagerForLLMRun

from app.config import settings


logger = logging.getLogger(__name__)


class CustomLLM(LLM):
    """Custom LLM implementation for generic API endpoints."""

    api_url: str
    api_key: str
    model_name: str

    @property
    def _llm_type(self) -> str:
        return "custom_llm"

    def _call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        run_manager: Optional[CallbackManagerForLLMRun] = None,
        **kwargs: Any,
    ) -> str:
        """Call the custom LLM API."""
        try:
            logger.info(f"Calling AI API: {self.api_url} with model: {self.model_name}")

            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            }

            # Generic request format - adapt based on your API
            data = {
                "model": self.model_name,
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": kwargs.get("max_tokens", 2000),
                "temperature": kwargs.get("temperature", 0.7),
                "stream": False,  # 非流式调用
            }

            with httpx.Client() as client:
                response = client.post(
                    self.api_url,
                    headers=headers,
                    json=data,
                    timeout=settings.AI_REQUEST_TIMEOUT,  # 使用配置的超时时间
                )
                response.raise_for_status()
                result = response.json()

                logger.info(
                    f"AI API call successful, response length: {len(str(result))}"
                )

                # Adapt response parsing based on your API format
                if "choices" in result and len(result["choices"]) > 0:
                    return result["choices"][0]["message"]["content"]
                elif "response" in result:
                    return result["response"]
                else:
                    return str(result)

        except httpx.TimeoutException as e:
            logger.error(f"AI API timeout: {e}")
            return f"抱歉，AI服务响应超时，请稍后再试。"
        except httpx.HTTPStatusError as e:
            logger.error(
                f"AI API HTTP error: {e.response.status_code} - {e.response.text}"
            )
            return f"抱歉，AI服务返回错误：{e.response.status_code}"
        except Exception as e:
            logger.error(f"LLM API call failed: {e}")
            return f"抱歉，AI服务暂时不可用。错误信息：{str(e)}"

    async def _stream_call(
        self,
        prompt: str,
        stop: Optional[List[str]] = None,
        **kwargs: Any,
    ) -> AsyncGenerator[str, None]:
        """Stream call to the custom LLM API."""
        try:
            logger.info(
                f"Starting stream call to AI API: {self.api_url} with model: {self.model_name}"
            )

            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            }

            # Stream request format
            data = {
                "model": self.model_name,
                "messages": [{"role": "user", "content": prompt}],
                "max_tokens": kwargs.get("max_tokens", 2000),
                "temperature": kwargs.get("temperature", 0.7),
                "stream": True,  # 启用流式输出
            }

            async with httpx.AsyncClient() as client:
                async with client.stream(
                    "POST",
                    self.api_url,
                    headers=headers,
                    json=data,
                    timeout=settings.AI_REQUEST_TIMEOUT,
                ) as response:
                    response.raise_for_status()

                    async for line in response.aiter_lines():
                        if line.strip():
                            # 处理Server-Sent Events格式
                            if line.startswith("data: "):
                                data_str = line[6:]  # 移除 "data: " 前缀

                                if data_str.strip() == "[DONE]":
                                    break

                                try:
                                    data_json = json.loads(data_str)

                                    # 适配不同API的响应格式
                                    if (
                                        "choices" in data_json
                                        and len(data_json["choices"]) > 0
                                    ):
                                        choice = data_json["choices"][0]
                                        if (
                                            "delta" in choice
                                            and "content" in choice["delta"]
                                        ):
                                            content = choice["delta"]["content"]
                                            if content:
                                                yield content
                                        elif "text" in choice:
                                            yield choice["text"]
                                    elif "response" in data_json:
                                        yield data_json["response"]

                                except json.JSONDecodeError:
                                    # 如果不是JSON格式，直接返回内容
                                    if data_str.strip():
                                        yield data_str.strip()

        except httpx.TimeoutException as e:
            logger.error(f"AI API stream timeout: {e}")
            yield f"抱歉，AI服务响应超时，请稍后再试。"
        except httpx.HTTPStatusError as e:
            logger.error(
                f"AI API stream HTTP error: {e.response.status_code} - {e.response.text}"
            )
            yield f"抱歉，AI服务返回错误：{e.response.status_code}"
        except Exception as e:
            logger.error(f"LLM API stream call failed: {e}")
            yield f"抱歉，AI服务暂时不可用。错误信息：{str(e)}"


class AIService:
    """AI service for chat, interpretation, and text generation."""

    def __init__(self):
        self.llm = None
        if settings.AI_API_URL and settings.AI_API_KEY and settings.AI_MODEL_NAME:
            self.llm = CustomLLM(
                api_url=settings.AI_API_URL,
                api_key=settings.AI_API_KEY,
                model_name=settings.AI_MODEL_NAME,
            )

    async def chat_with_ai_stream(
        self,
        message: str,
        book_context: Optional[Dict[str, Any]] = None,
        chat_history: Optional[List[Dict[str, str]]] = None,
        system_prompt: Optional[str] = None,
        summary: Optional[str] = None,
        max_pairs: int = 6,
    ) -> AsyncGenerator[str, None]:
        """流式AI聊天对话（改进：
        - 去重当前用户消息
        - 仅保留最近 max_pairs 轮完整问答
        - 支持自定义 system_prompt / 摘要 / 书籍上下文注入
        """
        if not self.llm:
            yield "AI服务未配置，请检查配置文件。"
            return

        try:
            # 普通问答默认系统提示（精简，不强制结构化格式）
            default_prompt = "你是 AncientSeeker 的智能问答助手，AncientSeeker是将传统典籍与智能语义融合，打造沉浸式知识寻踪体验的平台。\n"
            active_system_prompt = (
                system_prompt.strip() if system_prompt else default_prompt
            )

            context_info = ""
            if book_context:
                title = book_context.get("title", "")
                author = book_context.get("author", "")
                dynasty = book_context.get("dynasty", "")
                context_info = f"\n\n讨论典籍：《{title}》（{author}，{dynasty}）"

            usable_pairs: List[Dict[str, str]] = []
            if chat_history:
                # 删除末尾未完成（response 为空）的对话对
                trimmed = chat_history[:]
                if trimmed and not trimmed[-1].get("response"):
                    trimmed = trimmed[:-1]
                # 仅保留有内容的完整对
                trimmed = [p for p in trimmed if p.get("message") and p.get("response")]
                if len(trimmed) > max_pairs:
                    trimmed = trimmed[-max_pairs:]
                usable_pairs = trimmed

            history_context = ""
            if usable_pairs:
                history_context = "\n\n最近对话（按时间顺序）：\n"
                for pair in usable_pairs:
                    history_context += (
                        f"用户：{pair['message']}\n助手：{pair['response']}\n\n"
                    )

            summary_block = f"\n\n早期摘要：{summary}" if summary else ""

            full_prompt = (
                f"{active_system_prompt}{context_info}{summary_block}{history_context}"
                f"\n\n用户：{message}\n"
                "请直接回答问题。"
            )

            # 流式调用AI
            async for chunk in self.llm._stream_call(full_prompt):
                yield chunk

        except Exception as e:
            logger.error(f"AI stream chat failed: {e}")
            yield f"抱歉，处理您的问题时出现了错误：{str(e)}"

    async def interpret_text_stream(
        self,
        text: str,
        book_context: Optional[Dict[str, Any]] = None,
        question: Optional[str] = None,
    ) -> AsyncGenerator[str, None]:
        """流式文本解读服务"""
        if not self.llm:
            yield "AI服务未配置，请检查配置文件。"
            return

        try:
            book_info = ""
            if book_context:
                title = book_context.get("title", "")
                author = book_context.get("author", "")
                dynasty = book_context.get("dynasty", "")
                book_info = f"典籍：《{title}》（{author}，{dynasty}）\n\n"

            specific_question = f"\n\n特定问题：{question}" if question else ""

            prompt_text = f"""作为古典文献专家，请详细解读以下文本：
            
{book_info}文本内容："{text}"{specific_question}

请从以下几个方面进行解读：

## 文本解读
**字面含义：**
（请解释文本的直接意思）

**深层含义：**
（请阐述文本的哲学思想和深层内涵）

## 关键概念
（请列出并解释文本中的重要概念）

## 文化背景
（请介绍文本的历史文化背景）

## 现代意义
（请说明文本对现代人的启发和意义）

请用清晰、易懂的语言进行解读，避免JSON格式，直接以正文形式返回。"""

            # 流式调用AI
            async for chunk in self.llm._stream_call(prompt_text):
                yield chunk

        except Exception as e:
            logger.error(f"Stream text interpretation failed: {e}")
            yield f"文本解读时出现错误：{str(e)}"

    async def chat_with_ai(
        self,
        message: str,
        book_context: Optional[Dict[str, Any]] = None,
        chat_history: Optional[List[Dict[str, str]]] = None,
        system_prompt: Optional[str] = None,
        summary: Optional[str] = None,
        max_pairs: int = 6,
    ) -> Dict[str, Any]:
        """AI聊天对话（改进 prompt 构建，同 chat_with_ai_stream）。"""
        if not self.llm:
            return {
                "response": "AI服务未配置，请检查配置文件。",
                "references": [],
                "thinking_process": None,
            }

        try:
            # 普通问答默认系统提示（精简，不强制结构化格式）
            default_prompt = "你是 AncientSeeker 的智能问答助手，AncientSeeker是将传统典籍与智能语义融合，打造沉浸式知识寻踪体验的平台。\n"
            active_system_prompt = (
                system_prompt.strip() if system_prompt else default_prompt
            )

            context_info = ""
            if book_context:
                title = book_context.get("title", "")
                author = book_context.get("author", "")
                dynasty = book_context.get("dynasty", "")
                context_info = f"\n\n讨论典籍：《{title}》（{author}，{dynasty}）"

            usable_pairs: List[Dict[str, str]] = []
            if chat_history:
                trimmed = chat_history[:]
                if trimmed and not trimmed[-1].get("response"):
                    trimmed = trimmed[:-1]
                trimmed = [p for p in trimmed if p.get("message") and p.get("response")]
                if len(trimmed) > max_pairs:
                    trimmed = trimmed[-max_pairs:]
                usable_pairs = trimmed

            history_context = ""
            if usable_pairs:
                history_context = "\n\n最近对话（按时间顺序）：\n"
                for pair in usable_pairs:
                    history_context += (
                        f"用户：{pair['message']}\n助手：{pair['response']}\n\n"
                    )

            summary_block = f"\n\n早期摘要：{summary}" if summary else ""

            full_prompt = (
                f"{active_system_prompt}{context_info}{summary_block}{history_context}"
                f"\n\n用户：{message}\n"
                "请直接回答问题。"
            )

            response = self.llm(full_prompt)

            return {
                "response": response,
                "references": self._extract_references(response),
                "thinking_process": None,
                "book_context": book_context,
            }

        except Exception as e:
            logger.error(f"AI chat failed: {e}")
            return {
                "response": f"抱歉，处理您的问题时出现了错误：{str(e)}",
                "references": [],
                "thinking_process": None,
            }

    async def generate_title(self, user_question: str, max_len: int = 12) -> str:
        """基于首轮用户问题生成一个精炼标题（调用 LLM）。

        规则：
        - 不超过 max_len 个汉字或可视字符
        - 不带引号 / 句号 / 问号等末尾标点
        - 语义概括，名词/短语形式，避免“请帮我”“如何” 等冗余
        - 若调用失败或响应异常，回退到截断策略
        """
        if not user_question:
            return "新对话"
        if not self.llm:
            return (user_question.strip().replace("\n", " ")[:max_len]).rstrip(
                "，。！？,. ;；："
            ) or "新对话"

        import asyncio, re

        base_prompt = (
            "你是一个标题生成助手。请阅读用户问题，输出一个简洁中文标题，不超过"
            f"{max_len}个汉字，禁止使用标点、引号、序号、括号、书名号、英文句号。\n"
            "直接输出标题本身，不要解释。若问题包含客套/助词(例如: 请、请问、帮我、是否能、可以、如何、能否)，请忽略这些词。"
            "\n用户问题：" + user_question.strip().replace("\n", " ") + "\n标题："
        )

        def _invoke():
            try:
                return self.llm(base_prompt, max_tokens=max_len * 4, temperature=0.2)  # type: ignore[call-arg]
            except Exception as e:  # pragma: no cover
                logger.error(f"Title generation failed: {e}")
                return ""

        try:
            loop = asyncio.get_event_loop()
            raw = await loop.run_in_executor(None, _invoke)
        except Exception as e:  # pragma: no cover
            logger.error(f"Async executor for title failed: {e}")
            raw = ""

        candidate = (raw or "").strip().splitlines()[0]
        # 常见冗余前缀
        candidate = re.sub(
            r"^(请问?|帮我|请你|能否|可以|如何|是否|请)[:：,，。\s]+", "", candidate
        )
        # 删除所有常见中文英文标点符号
        candidate = re.sub(
            r"[，。！？!?:：；;、“”‘’'\"()（）\[\]{}<>《》·…—\-]", "", candidate
        )
        candidate = candidate.replace(" ", "")
        if len(candidate) > max_len:
            candidate = candidate[:max_len]
        candidate = candidate.strip()
        if not candidate:
            fallback = user_question.strip().replace("\n", " ") or "新对话"
            candidate = fallback[:max_len].rstrip("，。！？,. ;；：") or "新对话"
        return candidate or "新对话"

    # legacy interpret_text / generate_quiz removed

    def _extract_references(self, response: str) -> List[str]:
        """从响应中提取引用的文献"""
        references = []
        # 简单的引用提取逻辑，可以根据需要改进
        import re

        patterns = [
            r"《([^》]+)》",  # 书名号
            r'"([^"]+)"',  # 双引号
        ]

        for pattern in patterns:
            matches = re.findall(pattern, response)
            references.extend(matches)

        return list(set(references))  # 去重


class ImageAIService:
    """AI图像生成服务 - 基于LangChain实现"""

    def __init__(self):
        self.api_url = settings.IMAGE_AI_API_URL
        self.api_key = settings.IMAGE_AI_API_KEY
        self.model_name = settings.IMAGE_AI_MODEL_NAME

        # 确保配置完整
        self._validate_config()

    def _validate_config(self):
        """验证配置完整性"""
        if not all([self.api_url, self.api_key, self.model_name]):
            logger.warning("图像生成服务配置不完整，将使用占位符图片")

    async def generate_image(
        self,
        prompt: str,
        style: str = "古典",
        size: str = "1024*1024",
        user_id: int | None = None,
    ) -> Dict[str, Any]:
        """生成图像 - 参考dashscope实现方式"""
        if not all([self.api_url, self.api_key, self.model_name]):
            logger.warning("图像生成服务未配置，返回占位符")
            return {
                # 占位符路径：/static 已直接指向 images 目录，因此不再加 /images
                "image_url": "/static/placeholder.jpg",
                "prompt": prompt,
                "style": style,
                "generation_time": 0.0,
            }

        try:
            start_time = datetime.now()
            logger.info(f"开始生成图像，提示词: {prompt}，风格: {style}，尺寸: {size}")

            # 风格化提示词处理
            styled_prompt = self._apply_style_to_prompt(prompt, style)

            # 调用图像生成API
            image_url = await self._call_image_generation_api(
                styled_prompt, size, user_id
            )
            # _call_image_generation_api 已负责按 user_id 下载保存

            generation_time = (datetime.now() - start_time).total_seconds()
            logger.info(f"图像生成完成，耗时: {generation_time:.2f}秒")

            return {
                "image_url": image_url,
                "prompt": prompt,
                "style": style,
                "generation_time": generation_time,
            }

        except Exception as e:
            logger.error(f"图像生成失败: {e}")
            return {
                "image_url": "/static/placeholder.jpg",
                "prompt": prompt,
                "style": style,
                "generation_time": 0.0,
                "error": str(e),
            }

    def _apply_style_to_prompt(self, prompt: str, style: str) -> str:
        """应用风格到提示词

        当前实现：将前端传入的语义风格映射为中文描述拼接到 prompt，
        同时在请求体 parameters.style=auto 让后端模型自动判定；
        如果后续模型文档提供明确的 style 枚举（例如: 'anime','photography' 等），
        可在此返回原始 prompt 并直接把枚举值传给 parameters.style。
        """
        style_prompts = {
            "古典": "中国古典绘画风格，水墨画，传统艺术，精致细腻",
            "写实": "写实主义风格，高清摄影，细节丰富，真实感强",
            "抽象": "抽象艺术风格，现代艺术，创意表现，色彩丰富",
            "水墨": "中国水墨画风格，黑白灰调，意境深远，笔触流畅",
            "油画": "油画风格，质感丰富，色彩饱满，笔触明显",
            "素描": "素描风格，线条清晰，明暗对比，简洁优雅",
        }

        style_suffix = style_prompts.get(style, "古典风格")
        return f"{prompt}，{style_suffix}"

    async def _call_image_generation_api(
        self, prompt: str, size: str, user_id: int | None = None
    ) -> str:
        """调用图像生成API - 支持dashscope异步调用模式"""
        try:
            # 构建请求头 - 按照官方文档格式
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
                "X-DashScope-Async": "enable",  # 官方文档明确要求必须设置
            }

            # 构建请求数据 - 完全按照官方文档格式
            # 新模型 (wan2.2-t2i-flash) 支持/兼容与 wanx-v1 相同的基本字段；
            # 如果后续文档出现差异，可在此分支化。
            request_data = {
                "model": self.model_name,
                "input": {"prompt": prompt},
                # style 不再写死 <auto>，交由上游 _apply_style_to_prompt 融合语义；
                # 仍保留 style 字段（可选）以便未来直接传结构化风格；这里设置 auto 让服务自行推断。
                "parameters": {"style": "auto", "size": size, "n": 1},
            }

            # 发送HTTP请求
            async with httpx.AsyncClient(timeout=settings.AI_REQUEST_TIMEOUT) as client:
                logger.info(f"调用图像生成API（异步模式）: {self.api_url}")
                response = await client.post(
                    self.api_url, headers=headers, json=request_data
                )

                # 检查HTTP状态码
                if response.status_code not in [200, 201, 202]:
                    logger.error(
                        f"API调用失败，状态码: {response.status_code}，响应: {response.text}"
                    )
                    # 如果仍然是403错误，尝试不使用异步模式
                    if response.status_code == 403:
                        return await self._try_sync_call(prompt, size, user_id)
                    raise Exception(f"API调用失败，状态码: {response.status_code}")

                result = response.json()
                logger.info(f"API响应成功: {result}")

                # 处理异步响应
                if "output" in result and (
                    "task_id" in result["output"] or "id" in result
                ):
                    # 异步模式，轮询结果
                    task_id = result["output"].get("task_id") or result.get("id")
                    logger.info(f"异步任务已创建，task_id: {task_id}，开始轮询结果")
                    return await self._poll_async_result(task_id, user_id=user_id)

                # 同步响应，直接解析
                image_url = await self._extract_image_url_from_response(result)

                # 如果是远程URL，下载并保存图像
                if image_url.startswith("http"):
                    local_image_path = await self._download_and_save_image(
                        image_url, user_id
                    )
                    return local_image_path

                return image_url

        except httpx.TimeoutException as e:
            logger.error(f"图像生成API请求超时: {e}")
            raise Exception("图像生成请求超时，请稍后重试")
        except httpx.HTTPStatusError as e:
            logger.error(
                f"图像生成API HTTP错误: {e.response.status_code} - {e.response.text}"
            )
            raise Exception(f"图像生成服务错误: {e.response.status_code}")
        except Exception as e:
            logger.error(f"图像生成API调用异常: {e}")
            raise

    async def _try_sync_call(
        self, prompt: str, size: str, user_id: int | None = None
    ) -> str:
        """尝试同步调用（不使用异步模式）"""
        try:
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            }

            request_data = {
                "model": self.model_name,
                "input": {"prompt": prompt},
                "parameters": {"size": size, "n": 1, "seed": 42},
            }

            async with httpx.AsyncClient(timeout=settings.AI_REQUEST_TIMEOUT) as client:
                logger.info(f"尝试同步调用图像生成API: {self.api_url}")
                response = await client.post(
                    self.api_url, headers=headers, json=request_data
                )

                if response.status_code != 200:
                    logger.error(f"同步调用也失败，状态码: {response.status_code}")
                    raise Exception(f"API调用失败，状态码: {response.status_code}")

                result = response.json()
                image_url = await self._extract_image_url_from_response(result)

                if image_url.startswith("http"):
                    return await self._download_and_save_image(image_url, user_id)
                return image_url

        except Exception as e:
            logger.error(f"同步调用失败: {e}")
            raise

    async def _poll_async_result(self, task_id: str, user_id: int | None = None) -> str:
        """轮询异步任务结果"""
        try:
            # 构建查询URL
            query_url = f"https://dashscope.aliyuncs.com/api/v1/tasks/{task_id}"
            headers = {"Authorization": f"Bearer {self.api_key}"}

            # 轮询结果，最多等待60秒
            import asyncio

            for attempt in range(30):  # 30次查询，每次2秒
                async with httpx.AsyncClient() as client:
                    response = await client.get(query_url, headers=headers)

                    if response.status_code == 200:
                        result = response.json()
                        logger.info(f"轮询结果第{attempt + 1}次: {result}")

                        # 检查任务状态 - dashscope格式
                        if "output" in result:
                            status = result["output"].get("task_status")
                        else:
                            status = result.get("task_status") or result.get("status")

                        if status == "SUCCEEDED":
                            # 任务成功，提取图像URL
                            image_url = await self._extract_image_url_from_response(
                                result
                            )
                            if image_url.startswith("http"):
                                return await self._download_and_save_image(
                                    image_url, user_id
                                )
                            return image_url

                        elif status in ["FAILED", "CANCELED"]:
                            logger.error(f"异步任务失败，状态: {status}")
                            raise Exception(f"图像生成任务失败: {status}")

                        # 任务还在运行中，等待2秒后再查询
                        logger.info(f"任务运行中，状态: {status}，{attempt + 1}/30")
                        await asyncio.sleep(2)
                    else:
                        logger.error(f"查询任务状态失败: {response.status_code}")
                        await asyncio.sleep(2)

            # 超时
            logger.error("异步任务查询超时")
            raise Exception("图像生成任务超时，请稍后重试")

        except Exception as e:
            logger.error(f"轮询异步结果失败: {e}")
            raise

    async def _extract_image_url_from_response(self, result: Dict[str, Any]) -> str:
        """从API响应中提取图像URL - 兼容多种响应格式"""
        # dashscope 图像生成响应格式
        if "output" in result and "results" in result["output"]:
            results = result["output"]["results"]
            if isinstance(results, list) and len(results) > 0:
                return results[0].get("url", "")

        # OpenAI DALL-E 格式
        if (
            "data" in result
            and isinstance(result["data"], list)
            and len(result["data"]) > 0
        ):
            return result["data"][0].get("url", "")

        # 通用格式
        if "url" in result:
            return result["url"]

        if "image_url" in result:
            return result["image_url"]

        # 如果都没找到，记录错误并抛出异常
        logger.error(f"无法从API响应中提取图像URL，响应格式: {result}")
        raise Exception("API响应格式不支持，无法获取图像URL")

    async def _download_and_save_image(
        self, image_url: str, user_id: int | None = None
    ) -> str:
        """下载并保存图像到按用户隔离的目录。

        Directory layout:
            data/images/generated/<user_id>/<filename>
        Falls back to legacy data/images if user_id not provided.
        Returns: /static/generated/<user_id>/<filename>  (FastAPI static mount updated separately)
        """
        try:
            from urllib.parse import urlparse, unquote
            from pathlib import PurePosixPath
            import os
            import uuid

            # 创建图像保存目录
            base_dir = Path("./data/images")
            # 使用 generated 作为顶级隔离目录
            if user_id is not None:
                image_dir = base_dir / "generated" / str(user_id)
            else:
                image_dir = base_dir
            os.makedirs(image_dir, exist_ok=True)

            # 生成文件名
            try:
                # 尝试从URL提取原始文件名
                file_name = PurePosixPath(unquote(urlparse(image_url).path)).parts[-1]
                if not file_name or "." not in file_name:
                    # 如果无法提取有效文件名，生成一个
                    file_name = f"generated_image_{uuid.uuid4().hex[:8]}.jpg"
            except Exception:
                file_name = f"generated_image_{uuid.uuid4().hex[:8]}.jpg"

            file_path = image_dir / file_name

            # 下载图像
            async with httpx.AsyncClient() as client:
                response = await client.get(image_url, timeout=30)
                response.raise_for_status()

                # 保存到本地
                with open(file_path, "wb") as f:
                    f.write(response.content)
                logger.info(f"图像已保存到: {file_path}")

                # 返回相对路径用于Web访问
                # /static 已挂载到 images 目录，直接返回 /static/<file>
                # 返回静态访问路径（需在 main.py 中挂载 /static/generated）
                if user_id is not None:
                    return f"/static/generated/{user_id}/{file_name}"
                return f"/static/{file_name}"

        except Exception as e:
            logger.error(f"下载并保存图像失败: {e}")
            # 如果下载失败，返回原始URL
            return image_url


# 全局服务实例
ai_service = AIService()
image_ai_service = ImageAIService()
