from openai import OpenAI
import logging
from typing import Optional, Generator, AsyncGenerator
from fastapi.responses import StreamingResponse
import json
import asyncio
import httpx
import logging

from app.config import settings

logger = logging.getLogger(__name__)


class DeepSeekImgClient:
    client = OpenAI(
        api_key=settings.DEEPSEEK_API_KEY,  # 从https://cloud.siliconflow.cn/account/ak获取
        base_url="https://api.siliconflow.cn/v1"
    )

    async def chat_with_image_stream(
            self,
            image_base64: str,
            prompt: str,
            model: str = "Qwen/Qwen2-VL-72B-Instruct",
            detail: str = "low"
    ) -> AsyncGenerator[str, None]:
        """
        使用通义千问 VL 模型进行图文对话，支持流式输出

        Args:
            image_base64: base64编码的图片数据
            prompt: 文本提示词
            model: 模型名称
            detail: 图片细节级别 ('low' or 'high')

        Yields:
            str: 流式响应的文本片段
        """
        try:
            messages = [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{image_base64}",
                                "detail": detail
                            }
                        },
                        {
                            "type": "text",
                            "text":  """请分析这张图片中的动物：
1. 这是什么动物？
2. 它的主要特征是什么？
3. 它属于什么科属？
4. 它的生活习性如何？
请用简洁的语言描述。"""
                        }
                    ]
                }
            ]

            # 创建流式响应
            response = await asyncio.to_thread(
                self.client.chat.completions.create,
                model=model,
                messages=messages,
                stream=True
            )

            # 逐块产生响应
            for chunk in response:
                if chunk.choices and chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content

        except Exception as e:
            logger.error(f"流式对话失败: {str(e)}", exc_info=True)
            yield f"Error: {str(e)}"

    @staticmethod
    async def process_stream_response(generator: AsyncGenerator) -> StreamingResponse:
        """
        处理流式响应，返回 FastAPI StreamingResponse
        """

        async def stream_generator():
            async for chunk in generator:
                yield f"data: {json.dumps({'content': chunk})}\n\n"

        return StreamingResponse(
            stream_generator(),
            media_type="text/event-stream"
        )
