import time
import asyncio
import logging
from typing import Dict, Any, Optional

import cloudscraper
from fastapi import HTTPException
from fastapi.responses import JSONResponse

from app.core.config import settings
from app.providers.base_provider import BaseProvider
from app.utils.oss_uploader import OSSImageUploader

logger = logging.getLogger(__name__)

class VisualGPTProvider(BaseProvider):
    BASE_URL = "https://visualgpt.io/api/v1"

    def __init__(self):
        self.scraper = cloudscraper.create_scraper()
        self.uploader = OSSImageUploader(self.scraper, self.get_get_headers())

    def _get_base_headers(self) -> Dict[str, str]:
        if not settings.VISUALGPT_COOKIE:
            raise ValueError("VISUALGPT_COOKIE 未在 .env 文件中配置。")
        return {
            "Accept": "application/json, text/plain, */*",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Origin": "https://visualgpt.io",
            "Referer": "https://visualgpt.io/",
            "Cookie": settings.VISUALGPT_COOKIE,
        }

    def get_get_headers(self) -> Dict[str, str]:
        return self._get_base_headers()

    def get_post_headers(self) -> Dict[str, str]:
        headers = self._get_base_headers()
        headers["Content-Type"] = "application/json; charset=UTF-8"
        return headers

    async def process_image_task(
        self,
        image_bytes: bytes,
        image_filename: str,
        prompt: str,
        model: Optional[str],
        n: int,
        size: str,
        aspect_ratio: str # 新增参数
    ) -> JSONResponse:
        model = model or settings.DEFAULT_EDIT_MODEL
        if model not in settings.MODEL_CONFIG:
            raise HTTPException(status_code=400, detail=f"不支持的模型: {model}")

        logger.info(f"开始处理任务，模型: {model}, Prompt: '{prompt[:50]}...'")

        try:
            logger.info("正在上传图片至 OSS...")
            image_url = await self.uploader.upload_image(image_bytes, image_filename)
            logger.info(f"图片上传成功: {image_url}")

            session_id = await self._start_prediction(image_url, prompt, model, n, size, aspect_ratio)
            logger.info(f"任务提交成功，Session ID: {session_id}")
            logger.info("正在轮询任务状态，部分模型可能需要较长时间，请耐心等待...")

            result_urls = await self._poll_for_status(session_id)
            logger.info(f"任务完成，获取到 {len(result_urls)} 个结果。")

            response_data = {
                "created": int(time.time()),
                "data": [{"url": url} for url in result_urls]
            }
            return JSONResponse(content=response_data)

        except Exception as e:
            logger.error(f"处理图像任务时出错: {e}", exc_info=True)
            raise HTTPException(status_code=500, detail=str(e))

    async def _start_prediction(self, image_url: str, prompt: str, model: str, n: int, size: str, aspect_ratio: str) -> str:
        url = f"{self.BASE_URL}/prediction/handle"
        model_params = settings.MODEL_CONFIG[model]

        payload = {
            "image_urls": [image_url],
            "type": model_params["type"],
            "sub_type": model_params["sub_type"],
            "user_prompt": prompt,
            "max_images": n,
            "size": size,
            "aspect_ratio": aspect_ratio,
            "num": n # API 也需要 num 字段
        }
        
        loop = asyncio.get_running_loop()
        response = await loop.run_in_executor(
            None, lambda: self.scraper.post(url, headers=self.get_post_headers(), json=payload, timeout=settings.API_REQUEST_TIMEOUT)
        )
        response.raise_for_status()
        data = response.json()

        if data.get("code") != 100000 or "data" not in data or "session_id" not in data["data"]:
            raise Exception(f"提交预测任务失败: {data.get('message', '未知错误')}")
        
        return data["data"]["session_id"]

    # _poll_for_status 和 get_models 方法保持不变
    async def _poll_for_status(self, session_id: str) -> list[str]:
        start_time = time.time()
        url = f"{self.BASE_URL}/prediction/get-status?session_id={session_id}"
        
        while time.time() - start_time < settings.POLLING_TIMEOUT:
            await asyncio.sleep(settings.POLLING_INTERVAL)
            
            loop = asyncio.get_running_loop()
            response = await loop.run_in_executor(
                None, lambda: self.scraper.get(url, headers=self.get_get_headers(), timeout=settings.API_REQUEST_TIMEOUT)
            )
            response.raise_for_status()
            data = response.json()

            if data.get("code") != 100000:
                logger.warning(f"轮询状态失败: {data.get('message')}")
                continue

            status_data = data.get("data", {})
            status = status_data.get("status")
            
            if status == "succeeded":
                results = status_data.get("results", [])
                if results and results[0].get("urls"):
                    return results[0]["urls"]
                else:
                    raise Exception("任务成功但未找到结果 URL。")
            elif status == "failed":
                error_msg = status_data.get("results", [{}])[0].get("error", "未知错误")
                raise Exception(f"上游任务处理失败: {error_msg}")
            
            logger.debug(f"任务状态: {status}, Session ID: {session_id}")

        raise Exception("轮询任务状态超时。")

    async def get_models(self) -> JSONResponse:
        model_data = {
            "object": "list",
            "data": [
                {
                    "id": name,
                    "object": "model",
                    "created": int(time.time()),
                    "owned_by": "lzA6",
                    "description": config["description"]
                }
                for name, config in settings.MODEL_CONFIG.items()
            ]
        }
        return JSONResponse(content=model_data)
