import os
import json
import base64
from io import BytesIO
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Dict
from uuid import uuid4

import requests
from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from PIL import Image, ImageFilter, ImageEnhance


# -------------------------------------------------
# Simple .env loader (no external dependency)
# -------------------------------------------------
def load_dotenv_simple(path: str = ".env") -> None:
    p = Path(path)
    if not p.exists():
        return
    try:
        for line in p.read_text(encoding="utf-8").splitlines():
            line = line.strip()
            if not line or line.startswith("#"):
                continue
            if "=" not in line:
                continue
            k, v = line.split("=", 1)
            k = k.strip()
            v = v.strip().strip('"').strip("'")
            # do not override if already present
            if k and (os.environ.get(k) is None):
                os.environ[k] = v
    except Exception:
        pass


load_dotenv_simple()


# -------------------------------------------------
# App setup
# -------------------------------------------------
app = FastAPI(title="AI Image Generator", version="0.1.0")

static_dir = Path("static")
generated_dir = static_dir / "generated"
generated_dir.mkdir(parents=True, exist_ok=True)

app.mount("/static", StaticFiles(directory=str(static_dir)), name="static")
templates = Jinja2Templates(directory="templates")
# In-memory store for Post/Redirect/Get results to avoid resubmission on refresh
app.state.results_store = {}


# -------------------------------------------------
# Helpers
# -------------------------------------------------
def _use_proxy() -> bool:
    # treat any non-empty truthy value as enabled ("1", "true", etc.)
    v = os.getenv("USE_PROXY", "").strip().lower()
    return v in {"1", "true", "yes", "on"}


def _proxies() -> Optional[Dict[str, str]]:
    if not _use_proxy():
        return None
    addr = os.getenv("LOCAL_PROXY", "127.0.0.1:7890")
    return {"http": f"http://{addr}", "https": f"http://{addr}"}


def _safe_stem(text: str, max_len: int = 40) -> str:
    s = (text or '').strip().replace('\n', ' ').replace('\r', ' ')
    for ch in ['\\\n', '\\', '/', ':', '*', '?', '"', '<', '>', '|', ',']:
        s = s.replace(ch, ' ')
    s = ' '.join(s.split())
    s = s.replace(' ', '_')
    if len(s) > max_len:
        s = s[:max_len]
    return s or "img"


# ---------------- Doubao URL generator ----------------
DOUBAO_MODEL = os.getenv("DOUBAO_MODEL", "doubao-seedream-4-0-250828")
DOUBAO_API = f"https://aihubmix.com/v1/models/doubao/{DOUBAO_MODEL}/predictions"


def generate_with_doubao(
    prompt: str,
    api_key: str,
    ratio: str = "9:16",
    resolution: str = "1080x1920",
    send_ratio_params: bool = True,
    temperature: Optional[float] = None,
    model_override: Optional[str] = None,
) -> Optional[str]:
    """Return image URL or None. Prefer 9:16 portrait by default.

    Strategy:
    1) Try with explicit ratio/width/height params
    2) On failure, fallback to size-only and enforce ratio in prompt text
    """
    ratio = (ratio or "9:16").strip()
    resolution = (resolution or "1920x1080").lower().strip()
    w = h = None
    size_token: Optional[str] = None
    if "x" in resolution:
        try:
            w_str, h_str = resolution.split("x", 1)
            w = int(w_str)
            h = int(h_str)
        except Exception:
            w = h = None
    else:
        # support tokens like "2k", "4k"
        r = resolution.replace(" ", "").upper()
        if r in {"1K", "2K", "3K", "4K"}:
            size_token = r

    # derive size token from dimensions if not explicitly provided
    if size_token is None and w and h:
        long_side = max(w, h)
        try:
            if long_side >= 3500:
                size_token = "4K"
            elif long_side >= 2600:
                size_token = "3K"
            elif long_side >= 1700:
                size_token = "2K"
            else:
                size_token = "1K"
        except Exception:
            size_token = None

    # Orient wording by ratio to reduce ambiguity
    orient = "纵向构图" if ratio.replace(" ", "") in {"9:16", "9/16"} else ("宽画幅构图" if ratio.replace(" ", "") in {"16:9", "16/9"} else "构图")
    base_prompt = f"严格按描述生成{ratio}{orient}的高清图片，不要出现水印：" + prompt.strip()

    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}",
    }

    def _call(payload: dict) -> Optional[str]:
        s = requests.Session()
        s.trust_env = False
        proxies = _proxies()
        if proxies:
            s.proxies = proxies
        api_url = (
            f"https://aihubmix.com/v1/models/doubao/{model_override}/predictions"
            if model_override
            else DOUBAO_API
        )
        resp = s.post(api_url, headers=headers, json=payload, timeout=120)
        # If client or server error, raise to allow fallback
        resp.raise_for_status()
        data = resp.json()
        output = data.get("output") or []
        if isinstance(output, list) and output:
            url = (output[0] or {}).get("url")
            if isinstance(url, str) and url:
                return url
        return None

    # Attempt 1: with ratio and dimensions if requested
    if send_ratio_params:
        inp = {
            "prompt": base_prompt,
            "sequential_image_generation": "disabled",
            "stream": False,
            "response_format": "url",
            "watermark": False,
            # best-effort hints understood by many backends
            "aspect_ratio": ratio,
            "image_aspect_ratio": ratio,
        }
        if temperature is not None:
            try:
                t = float(temperature)
                inp["temperature"] = max(0.0, min(2.0, t))
            except Exception:
                pass
        # Prefer size token for SeedDream override; some backends ignore width/height
        if model_override and "seedream" in model_override:
            if size_token:
                inp["size"] = size_token
        else:
            if w and h:
                inp.update({"width": w, "height": h})
            if size_token:
                inp["size"] = size_token
        payload = {"input": inp}
        try:
            result = _call(payload)
            if result:
                return result
        except Exception:
            # fall through to attempt 2
            pass

    # Attempt 2: size-only with ratio enforced in prompt
    inp2 = {
        "prompt": base_prompt,
        "sequential_image_generation": "disabled",
        "stream": False,
        "response_format": "url",
        "watermark": False,
        # keep trying to hint ratio
        "aspect_ratio": ratio,
        "image_aspect_ratio": ratio,
    }
    inp2["size"] = size_token or "2K"
    payload2 = {"input": inp2}
    if temperature is not None:
        try:
            t = float(temperature)
            payload2["input"]["temperature"] = max(0.0, min(2.0, t))
        except Exception:
            pass
    try:
        return _call(payload2)
    except Exception:
        return None


# ---------------- Gemini (save to static) ----------------
def generate_with_chat_image_to_static(prompt: str, api_key: str, model: str, temperature: float = 0.7) -> Optional[str]:
    """Call AIHUBMIX chat.completions with given model. Save image to /static/generated and return web path.

    Tries to decode base64 from multi_mod_content.inline_data; falls back to downloading image_url.
    """
    try:
        from openai import OpenAI  # type: ignore
    except Exception:
        return None

    client = OpenAI(api_key=api_key, base_url="https://aihubmix.com/v1")

    # Build prompt leaning to 16:9 cinematic framing when user not specifying ratio in form.
    full_prompt = f"Generate a 16:9 cinematic, well-lit, stylistically coherent image based on: {prompt.strip()}"
    try:
        # clamp temperature and pass through
        try:
            t = float(temperature)
        except Exception:
            t = 0.7
        t = max(0.0, min(2.0, t))
        resp = client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": full_prompt}],
            temperature=t,
            modalities=["text", "image"],
        )
        msg = resp.choices[0].message
        b64_data: Optional[str] = None
        url_data: Optional[str] = None
        if hasattr(msg, "multi_mod_content") and msg.multi_mod_content:
            for mod in msg.multi_mod_content:
                if isinstance(mod, dict):
                    if mod.get("inline_data") and isinstance(mod["inline_data"], dict):
                        d = mod["inline_data"].get("data")
                        if isinstance(d, str) and d.strip():
                            b64_data = d.strip()
                            break
                    if mod.get("image_url") and isinstance(mod["image_url"], dict):
                        u = mod["image_url"].get("url")
                        if isinstance(u, str) and u.strip():
                            url_data = u.strip()
        # Save output
        ts = datetime.now().strftime("%Y%m%d%H%M%S")
        stem = _safe_stem(f"{model}_{prompt}")
        filename = f"chat_{stem}_{ts}.png"
        out_path = (generated_dir / filename)
        if b64_data:
            out_path.write_bytes(base64.b64decode(b64_data))
            return f"/static/generated/{filename}"
        if url_data:
            web = _download_to_generated(url_data, f"chat_{stem}")
            return web
    except Exception:
        return None
    return None


def generate_with_imagen_to_static(prompt: str, api_key: str, aspect_ratio: str = "16:9", number_of_images: int = 1) -> Optional[str]:
    """Call AIHubMix Imagen 4.0 predictions REST API and save first image.

    Uses the official endpoint: https://aihubmix.com/v1/models/imagen-4.0/predictions
    Minimal input is prompt + numberOfImages. We also hint aspect ratio in prompt text.
    Returns the web path of the downloaded image, or None on failure.
    """
    try:
        s = requests.Session()
        s.trust_env = False
        proxies = _proxies()
        if proxies:
            s.proxies = proxies

        # Build headers and payload
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}",
        }

        # Lightly hint the aspect ratio to improve framing
        ar = (aspect_ratio or "16:9").strip()
        orient_hint = "16:9 cinematic" if ar == "16:9" else ("9:16 portrait" if ar == "9:16" else ar)
        full_prompt = f"{prompt.strip()}\nAspect ratio hint: {orient_hint}"

        payload = {
            "input": {
                "prompt": full_prompt,
                "numberOfImages": max(1, int(number_of_images)),
            }
        }

        url = "https://aihubmix.com/v1/models/imagen-4.0/predictions"
        resp = s.post(url, headers=headers, json=payload, timeout=120)
        resp.raise_for_status()
        data = resp.json()

        # Expecting output as a list with objects containing 'url'
        output = data.get("output") or []
        first_url: Optional[str] = None
        if isinstance(output, list) and output:
            # Handle both dict entries with 'url' and direct string URLs
            first = output[0]
            if isinstance(first, dict):
                first_url = first.get("url") if isinstance(first.get("url"), str) else None
            elif isinstance(first, str):
                first_url = first

        if not first_url:
            return None

        stem = _safe_stem(f"imagen4_{prompt}")
        web = _download_to_generated(first_url, f"imagen4_{stem}")
        return web
    except Exception:
        return None


def generate_with_qwen_to_static(prompt: str, api_key: str, aspect_ratio: str = "16:9", number_of_images: int = 1) -> Optional[str]:
    """Use google-genai client via AIHUBMIX gateway to call Qwen Image and save first image.

    Behaves like Imagen adapter but with model='qwen-image'. Returns web path for the first image.
    """
    try:
        from google import genai  # type: ignore
        from google.genai import types  # type: ignore
    except Exception:
        return None

    try:
        client = genai.Client(
            api_key=api_key,
            http_options={"base_url": "https://aihubmix.com/gemini"},
        )
        cfg = types.GenerateImagesConfig(
            number_of_images=max(1, int(number_of_images)),
            aspect_ratio=str(aspect_ratio or "16:9"),
        )
        resp = client.models.generate_images(
            model="qwen-image",
            prompt=prompt,
            config=cfg,
        )
        if resp and hasattr(resp, "generated_images") and resp.generated_images:
            gi = resp.generated_images[0]
            image_bytes = None
            try:
                image_bytes = gi.image.image_bytes  # type: ignore[attr-defined]
            except Exception:
                image_bytes = None
            if isinstance(image_bytes, str):
                try:
                    image_bytes = base64.b64decode(image_bytes)
                except Exception:
                    image_bytes = None
            if isinstance(image_bytes, (bytes, bytearray, memoryview)) and len(image_bytes) > 100:
                try:
                    buf = bytes(image_bytes) if isinstance(image_bytes, memoryview) else image_bytes
                    img = Image.open(BytesIO(buf))
                    img.load()
                except Exception:
                    return None
                ts = datetime.now().strftime("%Y%m%d%H%M%S")
                stem = _safe_stem(f"qwen_{prompt}")
                fmt = (img.format or "PNG").upper()
                ext = ".jpg" if fmt in ("JPEG", "JPG") else ".png"
                filename = f"qwen_{stem}_{ts}{ext}"
                out_path = generated_dir / filename
                try:
                    if ext == ".jpg":
                        img.convert("RGB").save(out_path, quality=95)
                    else:
                        img.save(out_path)
                except Exception:
                    return None
                return f"/static/generated/{filename}"
    except Exception:
        return None
    return None


def generate_with_imagen_genai_to_static(
    prompt: str,
    api_key: str,
    model: str = "imagen-4.0-ultra-generate-001",
    aspect_ratio: str = "16:9",
    number_of_images: int = 1,
) -> Optional[str]:
    """Use google-genai client via AIHUBMIX gateway to call Imagen (Ultra) and save first image.

    Compatible with models like 'imagen-4.0-ultra-generate-001'. Returns web path or None.
    """
    try:
        from google import genai  # type: ignore
        from google.genai import types  # type: ignore
    except Exception:
        return None

    try:
        client = genai.Client(api_key=api_key, http_options={"base_url": "https://aihubmix.com/gemini"})
        cfg = types.GenerateImagesConfig(
            number_of_images=max(1, int(number_of_images)),
            aspect_ratio=str(aspect_ratio or "16:9"),
        )
        resp = client.models.generate_images(model=model, prompt=prompt, config=cfg)
        if resp and hasattr(resp, "generated_images") and resp.generated_images:
            gi = resp.generated_images[0]
            # Try to read bytes; if string, treat as base64
            image_bytes = None
            try:
                image_bytes = gi.image.image_bytes  # type: ignore[attr-defined]
            except Exception:
                image_bytes = None
            if isinstance(image_bytes, str):
                try:
                    image_bytes = base64.b64decode(image_bytes)
                except Exception:
                    image_bytes = None
            if isinstance(image_bytes, (bytes, bytearray)) and len(image_bytes) > 100:
                try:
                    img = Image.open(BytesIO(image_bytes))
                    img.load()
                except Exception:
                    return None
                ts = datetime.now().strftime("%Y%m%d%H%M%S")
                stem = _safe_stem(f"{model}_{prompt}")
                fmt = (img.format or "PNG").upper()
                ext = ".jpg" if fmt in ("JPEG", "JPG") else ".png"
                filename = f"{model.replace('.', '_').replace('-', '_')}_{stem}_{ts}{ext}"
                out_path = generated_dir / filename
                try:
                    if ext == ".jpg":
                        img.convert("RGB").save(out_path, quality=95)
                    else:
                        img.save(out_path)
                except Exception:
                    return None
                return f"/static/generated/{filename}"
    except Exception:
        return None
    return None

def generate_with_imagen_genai_multi_to_static(
    prompt: str,
    api_key: str,
    model: str = "imagen-4.0-ultra-generate-001",
    aspect_ratio: str = "1:1",
    number_of_images: int = 1,
) -> List[str]:
    """Like generate_with_imagen_genai_to_static but returns a list of saved web paths.

    Single API call with number_of_images and loops over generated_images to save each.
    """
    try:
        from google import genai  # type: ignore
        from google.genai import types  # type: ignore
    except Exception:
        return []

    try:
        client = genai.Client(api_key=api_key, http_options={"base_url": "https://aihubmix.com/gemini"})
        cfg = types.GenerateImagesConfig(
            number_of_images=max(1, int(number_of_images)),
            aspect_ratio=str(aspect_ratio or "1:1"),
        )
        resp = client.models.generate_images(model=model, prompt=prompt, config=cfg)
        web_paths: List[str] = []
        if resp and hasattr(resp, "generated_images") and resp.generated_images:
            ts = datetime.now().strftime("%Y%m%d%H%M%S")
            stem = _safe_stem(f"{model}_{prompt}")
            for idx, gi in enumerate(resp.generated_images):
                image_bytes = None
                try:
                    image_bytes = gi.image.image_bytes  # type: ignore[attr-defined]
                except Exception:
                    image_bytes = None
                if isinstance(image_bytes, str):
                    try:
                        image_bytes = base64.b64decode(image_bytes)
                    except Exception:
                        image_bytes = None
                if not isinstance(image_bytes, (bytes, bytearray, memoryview)) or len(image_bytes) < 100:
                    continue
                try:
                    buf = bytes(image_bytes) if isinstance(image_bytes, memoryview) else image_bytes
                    img = Image.open(BytesIO(buf))
                    img.load()
                except Exception:
                    continue
                fmt = (img.format or "PNG").upper()
                ext = ".jpg" if fmt in ("JPEG", "JPG") else ".png"
                filename = f"{model.replace('.', '_').replace('-', '_')}_{stem}_{ts}_{idx+1}{ext}"
                out_path = generated_dir / filename
                try:
                    if ext == ".jpg":
                        img.convert("RGB").save(out_path, quality=95)
                    else:
                        img.save(out_path)
                    web_paths.append(f"/static/generated/{filename}")
                except Exception:
                    continue
        return web_paths
    except Exception:
        return []

def generate_with_ark_doubao_to_static(
    prompt: str,
    ratio: str = "16:9",
    resolution: str = "2K",
    number_of_images: int = 1,
    model: str = "doubao-seedream-4-0-250828",
) -> Optional[str]:
    """Use Volcengine Ark SDK to call Doubao seedream via Ark and save first image URL.

    Requires environment variable ARK_API_KEY.
    - ratio used as textual hint in prompt (Ark images.generate does not expose aspect_ratio directly in sample)
    - resolution maps to Ark size, e.g., "2K" | "4K" | "1024x1024" if supported
    """
    # Prefer env var 'HUOSHAN' per user requirement; fallback to 'ARK_API_KEY'
    api_key = os.getenv("HUOSHAN") or os.getenv("ARK_API_KEY")
    if not api_key:
        return None
    try:
        from volcenginesdkarkruntime import Ark  # type: ignore
        from volcenginesdkarkruntime.types.images.images import (
            SequentialImageGenerationOptions,  # type: ignore
        )
    except Exception:
        return None

    try:
        client = Ark(base_url="https://ark.cn-beijing.volces.com/api/v3", api_key=api_key)
        # augment prompt with ratio to enforce framing
        r = (ratio or "16:9").strip()
        orient = "宽画幅" if r.replace(" ", "") in {"16:9", "16/9"} else ("竖向" if r.replace(" ", "") in {"9:16", "9/16"} else "构图")
        final_prompt = f"请生成{r}{orient}的高清图片：" + prompt.strip()

        resp = client.images.generate(
            model=model,
            prompt=final_prompt,
            size=resolution or "2K",
            sequential_image_generation="disabled",
            sequential_image_generation_options=SequentialImageGenerationOptions(max_images=max(1, int(number_of_images))),
            response_format="url",
            watermark=False,
        )
        # resp.data is an iterable of image entries with .url
        urls: list[str] = []
        try:
            for it in (resp.data or []):  # type: ignore[attr-defined]
                u = getattr(it, "url", None)
                if isinstance(u, str) and u:
                    urls.append(u)
        except Exception:
            pass
        if not urls:
            return None
        # download first
        web = _download_to_generated(urls[0], f"ark_doubao_{_safe_stem(prompt)}")
        return web
    except Exception:
        return None


# ---------------- Banana nano background expand ----------------
def _read_image_bytes(url_or_path: str) -> Optional[bytes]:
    try:
        if url_or_path.startswith("http://") or url_or_path.startswith("https://"):
            s = requests.Session()
            s.trust_env = False
            proxies = _proxies()
            if proxies:
                s.proxies = proxies
            r = s.get(url_or_path, timeout=60)
            r.raise_for_status()
            return r.content
        # local static path, e.g., /static/generated/xxx.png
        p = url_or_path
        if url_or_path.startswith("/"):
            p = "." + url_or_path
        path = Path(p)
        if path.is_file():
            return path.read_bytes()
        return None
    except Exception:
        return None


def expand_background_with_banana_nano(prompt: str, image_bytes: bytes, api_key: str) -> Optional[str]:
    try:
        from openai import OpenAI  # type: ignore
    except Exception:
        return None

    model = os.getenv("BANANA_NANO_MODEL", "banana-nano")
    client = OpenAI(api_key=api_key, base_url="https://aihubmix.com/v1")

    b64img = base64.b64encode(image_bytes).decode("utf-8")
    instruction = (
        "请基于同一提示词进行背景扩展：" + prompt.strip() +
        "。要求：保持主体完整清晰与风格一致，向四周扩展背景并生成高清竖版画面，避免水印、文字、Logo。"
    )

    def _call(m: str) -> Optional[str]:
        try:
            resp = client.chat.completions.create(
                model=m,
                messages=[
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": instruction},
                            {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64img}"}},
                        ],
                    }
                ],
                temperature=0.7,
                modalities=["text", "image"],
            )
            msg = resp.choices[0].message
            if hasattr(msg, "multi_mod_content") and msg.multi_mod_content:
                for mod in msg.multi_mod_content:
                    if "inline_data" in mod and mod["inline_data"] and mod["inline_data"].get("data"):
                        return mod["inline_data"]["data"].strip()
        except Exception:
            return None
        return None

    # try preferred model, then fallback to gemini
    b64_out = _call(model)
    if not b64_out:
        b64_out = _call("gemini-2.5-flash-image-preview")
    return b64_out


# ---------------- Workflow helpers (crop/upscale/enhance) ----------------
def _save_bytes_to_generated(data: bytes, stem: str, suffix: str = ".png") -> str:
    ts = datetime.now().strftime("%Y%m%d%H%M%S")
    filename = f"{stem}_{ts}{suffix}"
    path = generated_dir / filename
    path.write_bytes(data)
    return f"/static/generated/{filename}"


def _download_to_generated(url: str, stem: str) -> Optional[str]:
    b = _read_image_bytes(url)
    if not b:
        return None
    return _save_bytes_to_generated(b, stem)


def _enforce_16_9_and_resize(abs_path: Path, target_wh=(1920, 1080)) -> Optional[Path]:
    try:
        with Image.open(abs_path) as im:
            im = im.convert("RGB")
            w, h = im.size
            target_ratio = 16 / 9
            cur_ratio = w / h
            if abs(cur_ratio - target_ratio) > 1e-3:
                # center crop to 16:9
                if cur_ratio > target_ratio:
                    # too wide -> crop width
                    new_w = int(h * target_ratio)
                    x0 = (w - new_w) // 2
                    box = (x0, 0, x0 + new_w, h)
                else:
                    # too tall -> crop height
                    new_h = int(w / target_ratio)
                    y0 = (h - new_h) // 2
                    box = (0, y0, w, y0 + new_h)
                im = im.crop(box)
            im = im.resize(target_wh, Image.Resampling.LANCZOS)
            out = abs_path.parent / (abs_path.stem + f"_crop_{target_wh[0]}x{target_wh[1]}" + abs_path.suffix)
            im.save(out, quality=95)
            return out
    except Exception:
        return None


def _parse_ratio(ratio: str) -> Optional[tuple[int, int]]:
    try:
        s = (ratio or "").strip().replace(" ", "").replace("/", ":")
        if ":" not in s:
            return None
        a, b = s.split(":", 1)
        w = int(a)
        h = int(b)
        if w <= 0 or h <= 0:
            return None
        return (w, h)
    except Exception:
        return None


def _enforce_ratio_and_resize(abs_path: Path, ratio: str, target_wh: tuple[int, int]) -> Optional[Path]:
    pair = _parse_ratio(ratio)
    if not pair:
        # fallback to simple resize
        return _enforce_16_9_and_resize(abs_path, target_wh)
    rw, rh = pair
    target_ratio = rw / rh
    try:
        with Image.open(abs_path) as im:
            im = im.convert("RGB")
            w, h = im.size
            cur_ratio = w / h
            if abs(cur_ratio - target_ratio) > 1e-3:
                if cur_ratio > target_ratio:
                    # too wide -> crop width
                    new_w = int(h * target_ratio)
                    x0 = (w - new_w) // 2
                    box = (x0, 0, x0 + new_w, h)
                else:
                    # too tall -> crop height
                    new_h = int(w / target_ratio)
                    y0 = (h - new_h) // 2
                    box = (0, y0, w, y0 + new_h)
                im = im.crop(box)
            im = im.resize(target_wh, Image.Resampling.LANCZOS)
            out = abs_path.parent / (abs_path.stem + f"_crop_{target_wh[0]}x{target_wh[1]}" + abs_path.suffix)
            im.save(out, quality=95)
            return out
    except Exception:
        return None


def _upscale_2x(abs_path: Path) -> Optional[Path]:
    try:
        with Image.open(abs_path) as im:
            im = im.convert("RGB")
            w, h = im.size
            im2 = im.resize((w * 2, h * 2), Image.Resampling.LANCZOS)
            out = abs_path.parent / (abs_path.stem + "_2x" + abs_path.suffix)
            im2.save(out, quality=95)
            return out
    except Exception:
        return None


def _sharpen_and_tune(abs_path: Path, sharpen=True, color=True, contrast=True) -> Optional[Path]:
    try:
        with Image.open(abs_path) as im:
            im = im.convert("RGB")
            if sharpen:
                im = im.filter(ImageFilter.UnsharpMask(radius=1.1, percent=120, threshold=2))
            if color:
                im = ImageEnhance.Color(im).enhance(1.05)
            if contrast:
                im = ImageEnhance.Contrast(im).enhance(1.05)
            out = abs_path.parent / (abs_path.stem + "_tune" + abs_path.suffix)
            im.save(out, quality=96)
            return out
    except Exception:
        return None


def _face_restore_if_available(abs_path: Path) -> Optional[Path]:
    """Optional face restoration using GFPGAN/CodeFormer if available. Returns output path or None.

    Note: Heavy dependencies are optional. If not installed, this step is skipped.
    """
    # Placeholder: try-import and skip if unavailable
    try:
        import cv2  # noqa: F401
    except Exception:
        return None
    # For lightweight environment, we skip actual face restore
    return None


def generate_with_ark_doubao_multi_to_static(
    prompt: str,
    ratio: str = "16:9",
    resolution: str = "2K",
    number_of_images: int = 1,
    model: str = "doubao-seedream-4-0-250828",
) -> List[str]:
    """Generate multiple images via Doubao Seedream Ark SDK and save all URLs.

    Returns list of web paths; empty list on failure.
    Requires env HUOSHAN or ARK_API_KEY.
    """
    api_key = os.getenv("HUOSHAN") or os.getenv("ARK_API_KEY")
    if not api_key:
        return []
    try:
        from volcenginesdkarkruntime import Ark  # type: ignore
        from volcenginesdkarkruntime.types.images.images import (
            SequentialImageGenerationOptions,  # type: ignore
        )
    except Exception:
        return []

    try:
        client = Ark(base_url="https://ark.cn-beijing.volces.com/api/v3", api_key=api_key)
        r = (ratio or "16:9").strip()
        orient = "宽画幅" if r.replace(" ", "") in {"16:9", "16/9"} else ("竖向" if r.replace(" ", "") in {"9:16", "9/16"} else "构图")
        final_prompt = f"请生成{r}{orient}的高清图片：" + prompt.strip()
        resp = client.images.generate(
            model=model,
            prompt=final_prompt,
            size=resolution or "2K",
            sequential_image_generation="disabled",
            sequential_image_generation_options=SequentialImageGenerationOptions(max_images=max(1, int(number_of_images))),
            response_format="url",
            watermark=False,
        )
        urls: list[str] = []
        try:
            for it in (resp.data or []):  # type: ignore[attr-defined]
                u = getattr(it, "url", None)
                if isinstance(u, str) and u:
                    urls.append(u)
        except Exception:
            pass
        web_paths: List[str] = []
        for i, u in enumerate(urls[:max(1, int(number_of_images))]):
            web = _download_to_generated(u, f"ark_doubao_{_safe_stem(prompt)}_{i+1}")
            if web:
                web_paths.append(web)
        return web_paths
    except Exception:
        return []


def refine_with_banana_nano(prompt: str, refine_note: str, image_bytes: bytes, api_key: str) -> Optional[str]:
    """Return base64 image using banana-nano (fallback gemini)."""
    try:
        from openai import OpenAI  # type: ignore
    except Exception:
        return None

    client = OpenAI(api_key=api_key, base_url="https://aihubmix.com/v1")
    b64img = base64.b64encode(image_bytes).decode("utf-8")
    instruction = (
        "在保持主体与构图不变的前提下进行精修：" + (refine_note or "修正细节缺陷，提升材质与质感，避免水印/文字/Logo，整体风格一致。")
        + " 原始提示词：" + prompt.strip()
    )

    def _call(model: str) -> Optional[str]:
        try:
            resp = client.chat.completions.create(
                model=model,
                messages=[
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": instruction},
                            {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64img}"}},
                        ],
                    }
                ],
                temperature=0.5,
                modalities=["text", "image"],
            )
            msg = resp.choices[0].message
            if hasattr(msg, "multi_mod_content") and msg.multi_mod_content:
                for mod in msg.multi_mod_content:
                    if "inline_data" in mod and mod["inline_data"] and mod["inline_data"].get("data"):
                        return mod["inline_data"]["data"].strip()
        except Exception:
            return None
        return None

    b64 = _call(os.getenv("BANANA_NANO_MODEL", "banana-nano"))
    if not b64:
        b64 = _call("gemini-2.5-flash-image-preview")
    return b64


def qa_inspect(image_bytes: bytes, api_key: str) -> Optional[str]:
    """Ask multimodal model to inspect quality issues; return plain text advice."""
    try:
        from openai import OpenAI  # type: ignore
    except Exception:
        return None
    client = OpenAI(api_key=api_key, base_url="https://aihubmix.com/v1")
    b64img = base64.b64encode(image_bytes).decode("utf-8")
    query = (
        "请检查图片质量问题并给出简短改进建议：聚焦 手部、面部、文字清晰度、几何畸变、曝光与对比度、边缘伪影、色彩偏移。"
        " 如果存在问题，请用要点列出并给出一条精炼的限制/指令以便下一轮生成修正。"
    )
    try:
        resp = client.chat.completions.create(
            model="gemini-2.5-pro",
            messages=[
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": query},
                        {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64img}"}},
                    ],
                }
            ],
            temperature=0.2,
        )
        content = resp.choices[0].message.content
        return content if isinstance(content, str) else None
    except Exception:
        return None


# -------------------------------------------------
# Routes
# -------------------------------------------------
@app.get("/", response_class=HTMLResponse)
async def index(request: Request):
    # PRG support: consume rid from query to display last result, avoiding resubmission on refresh
    rid = request.query_params.get("rid")
    payload = None
    if rid:
        try:
            payload = app.state.results_store.pop(rid, None)
        except Exception:
            payload = None

    ctx = {
        "request": request,
        "images": [],
        "error": None,
        "provider": "doubao",
        "prompt_value": "",
        "model_value": "gemini-2.5-flash-image-preview",
        "temperature_value": 0.7,
        "active_nav": "proxy",
    }
    if isinstance(payload, dict):
        ctx.update({
            "images": payload.get("images", []) or [],
            "error": payload.get("error"),
            "provider": payload.get("provider") or ctx["provider"],
            "prompt_value": payload.get("prompt") or ctx["prompt_value"],
            "model_value": payload.get("model") or ctx["model_value"],
            "temperature_value": payload.get("temperature") if payload.get("temperature") is not None else ctx["temperature_value"],
        })

    return templates.TemplateResponse("index.html", ctx)


@app.get("/cover-image", response_class=HTMLResponse)
async def cover_image_index(request: Request):
    # PRG: read back last result if present
    rid = request.query_params.get("rid")
    payload = None
    if rid:
        try:
            payload = app.state.results_store.pop(rid, None)
        except Exception:
            payload = None

    ctx = {
        "request": request,
        "images": [],
        "error": None,
        "prompt_value": "",
        "ratio_value": "16:9",
        "resolution_value": "1920x1080",
        "active_nav": "proxy",
    }
    if isinstance(payload, dict):
        try:
            ctx.update({
                "images": payload.get("images", []) or [],
                "error": payload.get("error"),
                "prompt_value": payload.get("prompt") or ctx["prompt_value"],
                "ratio_value": payload.get("ratio") or ctx["ratio_value"],
                "resolution_value": payload.get("resolution") or ctx["resolution_value"],
            })
        except Exception:
            pass

    return templates.TemplateResponse("cover_image.html", ctx)


@app.get("/imagen-ultra", response_class=HTMLResponse)
async def imagen_ultra_index(request: Request):
    # PRG: read back last result if present
    rid = request.query_params.get("rid")
    payload = None
    if rid:
        try:
            payload = app.state.results_store.pop(rid, None)
        except Exception:
            payload = None

    ctx = {
        "request": request,
        "images": [],
        "error": None,
        "prompt_value": "",
        "ratio_value": "1:1",
        "n_value": 1,
        "active_nav": "proxy",
    }
    if isinstance(payload, dict):
        try:
            ctx.update({
                "images": payload.get("images", []) or [],
                "error": payload.get("error"),
                "prompt_value": payload.get("prompt") or ctx["prompt_value"],
                "ratio_value": payload.get("ratio") or ctx["ratio_value"],
                "n_value": payload.get("n") or ctx["n_value"],
            })
        except Exception:
            pass

    return templates.TemplateResponse("imagen_ultra.html", ctx)


@app.post("/imagen-ultra", response_class=HTMLResponse)
async def imagen_ultra_generate(
    request: Request,
    prompt: str = Form(...),
    ratio: str = Form("1:1"),
    n: int = Form(1),
):
    prompt = (prompt or "").strip()
    if not prompt:
        return templates.TemplateResponse(
            "imagen_ultra.html",
            {"request": request, "images": [], "error": "请输入提示词（尽量英文以获得更佳效果）", "prompt_value": "", "ratio_value": ratio, "n_value": n},
            status_code=400,
        )

    api_key = os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        return templates.TemplateResponse(
            "imagen_ultra.html",
            {"request": request, "images": [], "error": "缺少 AIHUBMIX_API_KEY，请在 .env 中配置", "prompt_value": prompt, "ratio_value": ratio, "n_value": n},
            status_code=500,
        )

    allowed = {"1:1", "9:16", "16:9", "3:4", "4:3"}
    aspect = (ratio or "1:1").strip()
    if aspect not in allowed:
        aspect = "1:1"
    try:
        count = max(1, min(int(n), 6))
    except Exception:
        count = 1

    images = generate_with_imagen_genai_multi_to_static(
        prompt=prompt,
        api_key=api_key,
        model="imagen-4.0-ultra-generate-001",
        aspect_ratio=aspect,
        number_of_images=count,
    )
    error = None if images else "生成失败（Imagen 4.0 Ultra）"

    # PRG store and redirect
    rid = uuid4().hex
    try:
        app.state.results_store[rid] = {
            "images": images,
            "error": error,
            "prompt": prompt,
            "ratio": aspect,
            "n": count,
        }
    except Exception:
        pass
    return RedirectResponse(url=f"/imagen-ultra?rid={rid}", status_code=303)


@app.get("/ark", response_class=HTMLResponse)
async def ark_index(request: Request):
    rid = request.query_params.get("rid")
    payload = None
    if rid:
        try:
            payload = app.state.results_store.pop(rid, None)
        except Exception:
            payload = None

    ctx = {
        "request": request,
        "images": [],
        "error": None,
        "prompt_value": "",
        "ark_model_value": "doubao-seedream-4-0-250828",
        "ratio_value": "16:9",
        "resolution_value": "2K",
        "n_value": 1,
        "active_nav": "ark",
    }
    if isinstance(payload, dict):
        try:
            ctx.update({
                "images": payload.get("images", []) or [],
                "error": payload.get("error"),
                "prompt_value": payload.get("prompt") or ctx["prompt_value"],
                "ark_model_value": payload.get("ark_model") or ctx["ark_model_value"],
                "ratio_value": payload.get("ratio") or ctx["ratio_value"],
                "resolution_value": payload.get("resolution") or ctx["resolution_value"],
                "n_value": payload.get("n") or ctx["n_value"],
            })
        except Exception:
            pass
    return templates.TemplateResponse("ark.html", ctx)


@app.post("/ark", response_class=HTMLResponse)
async def ark_generate(
    request: Request,
    prompt: str = Form(...),
    ark_model: str = Form("doubao-seedream-4-0-250828"),
    ratio: str = Form("16:9"),
    resolution: str = Form("2K"),
    n: int = Form(1),
):
    prompt = (prompt or "").strip()
    if not prompt:
        return templates.TemplateResponse(
            "ark.html",
            {"request": request, "images": [], "error": "请输入提示词", "prompt_value": "", "ark_model_value": ark_model, "ratio_value": ratio, "resolution_value": resolution, "n_value": n, "active_nav": "ark"},
            status_code=400,
        )

    # require Ark credential
    if not (os.getenv("HUOSHAN") or os.getenv("ARK_API_KEY")):
        return templates.TemplateResponse(
            "ark.html",
            {"request": request, "images": [], "error": "缺少 HUOSHAN 或 ARK_API_KEY", "prompt_value": prompt, "ark_model_value": ark_model, "ratio_value": ratio, "resolution_value": resolution, "n_value": n, "active_nav": "ark"},
            status_code=500,
        )

    try:
        count = max(1, min(int(n), 6))
    except Exception:
        count = 1

    images = generate_with_ark_doubao_multi_to_static(
        prompt=prompt,
        ratio=ratio or "16:9",
        resolution=resolution or "2K",
        number_of_images=count,
        model=(ark_model or "doubao-seedream-4-0-250828").strip(),
    )
    error = None if images else "生成失败（火山 Ark）"

    rid = uuid4().hex
    try:
        app.state.results_store[rid] = {
            "images": images,
            "error": error,
            "prompt": prompt,
            "ark_model": ark_model,
            "ratio": ratio,
            "resolution": resolution,
            "n": count,
        }
    except Exception:
        pass
    return RedirectResponse(url=f"/ark?rid={rid}", status_code=303)


@app.post("/cover-image", response_class=HTMLResponse)
async def cover_image_generate(
    request: Request,
    prompt: str = Form(...),
    ratio: str = Form("16:9"),
    resolution: str = Form("1920x1080"),
):
    prompt = (prompt or "").strip()
    if not prompt:
        return templates.TemplateResponse(
            "cover_image.html",
            {"request": request, "images": [], "error": "请输入提示词", "prompt_value": "", "ratio_value": ratio, "resolution_value": resolution},
            status_code=400,
        )

    api_key = os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        return templates.TemplateResponse(
            "cover_image.html",
            {"request": request, "images": [], "error": "缺少 AIHUBMIX_API_KEY，请在 .env 中配置", "prompt_value": prompt, "ratio_value": ratio, "resolution_value": resolution},
            status_code=500,
        )

    # Force SeedDream 4.0 model
    model_slug = "doubao-seedream-4-0-250828"

    # compute target size based on selected 'K' level (long side)
    def _long_side_from_res(res: str) -> int:
        r = (res or "1920x1080").lower()
        if r.startswith("3840x") or r.endswith("x3840"):
            return 3840
        if r.startswith("2880x") or r.endswith("x2880"):
            return 2880
        if r.startswith("1280x") or r.endswith("x1280"):
            return 1280
        return 1920

    def _target_wh_for_ratio(ratio_str: str, long_side: int) -> tuple[int, int]:
        pair = _parse_ratio(ratio_str) or (16, 9)
        rw, rh = pair
        if rw >= rh:
            # landscape: width is long side
            W = long_side
            H = max(1, int(round(W * rh / rw)))
        else:
            # portrait: height is long side
            H = long_side
            W = max(1, int(round(H * rw / rh)))
        return (W, H)

    long_side = _long_side_from_res(resolution)
    tgt_w, tgt_h = _target_wh_for_ratio(ratio, long_side)

    images: List[str] = []
    error: Optional[str] = None
    # try model to honor ratio/size; use formatted WxH for hint
    url = generate_with_doubao(
        prompt,
        api_key,
        ratio=ratio or "16:9",
        resolution=f"{tgt_w}x{tgt_h}",
        send_ratio_params=True,
        temperature=None,
        model_override=model_slug,
    )
    final_web: Optional[str] = None
    if url:
        # download then enforce ratio/resize to final size
        stem = f"cover_{_safe_stem(prompt)}"
        web = _download_to_generated(url, stem)
        if web:
            abs_p = _abs_from_web(web)
            cropped = _enforce_ratio_and_resize(abs_p, ratio, (tgt_w, tgt_h)) or abs_p
            final_web = f"/static/generated/{cropped.name}"
    if final_web:
        images.append(final_web)
    else:
        error = "生成失败（SeedDream 4.0）"

    rid = uuid4().hex
    try:
        app.state.results_store[rid] = {
            "images": images,
            "error": error,
            "prompt": prompt,
            "ratio": ratio,
            "resolution": resolution,
        }
    except Exception:
        pass
    return RedirectResponse(url=f"/cover-image?rid={rid}", status_code=303)

@app.post("/generate", response_class=HTMLResponse)
async def generate(
    request: Request,
    prompt: str = Form(...),
    provider: str = Form("doubao"),
    ratio: str = Form("16:9"),
    resolution: str = Form("1920x1080"),
    force_ratio: Optional[str] = Form("on"),
    model: str = Form("gemini-2.5-flash-image-preview"),
    temperature: float = Form(0.7),
):
    prompt = (prompt or "").strip()
    if not prompt:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "images": [], "error": "请输入提示词", "provider": provider, "prompt_value": ""},
            status_code=400,
        )

    api_key = os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "images": [], "error": "缺少 AIHUBMIX_API_KEY，请在 .env 中配置", "provider": provider, "prompt_value": prompt},
            status_code=500,
        )

    images: List[str] = []
    error: Optional[str] = None

    if provider == "gemini":
        m = (model or "").strip()
        if m == "imagen-4.0":
            # Map ratio to aspect_ratio for Imagen 4.0
            r = (ratio or "16:9").strip()
            allowed = {"1:1", "9:16", "16:9", "3:4", "4:3"}
            aspect = r if r in allowed else "16:9"
            url = generate_with_imagen_to_static(prompt, api_key, aspect_ratio=aspect, number_of_images=1)
        elif m == "imagen-4.0-ultra-generate-001":
            r = (ratio or "16:9").strip()
            allowed = {"1:1", "9:16", "16:9", "3:4", "4:3"}
            aspect = r if r in allowed else "16:9"
            url = generate_with_imagen_genai_to_static(prompt, api_key, model=m, aspect_ratio=aspect, number_of_images=1)
        elif m == "qwen-image":
            r = (ratio or "16:9").strip()
            allowed = {"1:1", "9:16", "16:9", "3:4", "4:3"}
            aspect = r if r in allowed else "16:9"
            url = generate_with_qwen_to_static(prompt, api_key, aspect_ratio=aspect, number_of_images=1)
        elif m == "doubao-seedream-4-0-250828-ark":
            # map resolution to Ark size; default "2K"
            ark_size = "2K"
            # simple mapping from 1080x1920/1920x1080 etc to 2K
            if resolution and resolution.lower() in {"3840x2160", "2160x3840"}:
                ark_size = "4K"
            url = generate_with_ark_doubao_to_static(prompt, ratio=ratio or "16:9", resolution=ark_size, number_of_images=1)
        else:
            url = generate_with_chat_image_to_static(prompt, api_key, m, temperature)
        if url:
            images.append(url)
        else:
            error = f"生成失败（模型：{model}）"
    else:
        url = generate_with_doubao(
            prompt,
            api_key,
            ratio=ratio or "9:16",
            resolution=resolution or "1080x1920",
            send_ratio_params=(force_ratio or "on") in ("on", "true", "1"),
            temperature=temperature,
        )
        if url:
            images.append(url)
        else:
            error = "生成失败（Doubao）"

    # Store results and redirect (PRG) to avoid resubmission on refresh
    rid = uuid4().hex
    try:
        app.state.results_store[rid] = {
            "images": images,
            "error": error,
            "provider": provider,
            "prompt": prompt,
            "model": model,
            "temperature": temperature,
        }
    except Exception:
        pass
    return RedirectResponse(url=f"/?rid={rid}", status_code=303)


@app.post("/expand", response_class=HTMLResponse)
async def expand(request: Request, image_url: str = Form(...), prompt: str = Form("")):
    api_key = os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "images": [], "error": "缺少 AIHUBMIX_API_KEY，请在 .env 中配置", "provider": "doubao", "prompt_value": prompt},
            status_code=500,
        )

    img_bytes = _read_image_bytes(image_url)
    if not img_bytes:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "images": [], "error": "无法读取源图片", "provider": "doubao", "prompt_value": prompt},
            status_code=400,
        )

    b64 = expand_background_with_banana_nano(prompt, img_bytes, api_key)
    if not b64:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "images": [image_url], "error": "背景扩展失败", "provider": "doubao", "prompt_value": prompt},
            status_code=500,
        )

    # save to static
    ts = datetime.now().strftime("%Y%m%d%H%M%S")
    stem = _safe_stem(prompt)
    filename = f"nano_expand_{stem}_{ts}.png"
    out_path = generated_dir / filename
    try:
        out_path.write_bytes(base64.b64decode(b64))
    except Exception:
        return templates.TemplateResponse(
            "index.html",
            {"request": request, "images": [image_url], "error": "结果保存失败", "provider": "doubao", "prompt_value": prompt},
            status_code=500,
        )

    web_path = f"/static/generated/{filename}"

    return templates.TemplateResponse(
        "index.html",
        {"request": request, "images": [web_path], "error": None, "provider": "doubao", "prompt_value": prompt},
        status_code=200,
    )


# ---------------- Workflow routes ----------------
@app.get("/workflow", response_class=HTMLResponse)
async def workflow_index(request: Request):
    return templates.TemplateResponse(
        "workflow.html",
        {
            "request": request,
            "step": "start",
            "prompt": "",
            "candidates": [],
            "error": None,
            "base_size": "1024x576",
            "n": 4,
            "active_nav": "proxy",
        },
    )


def _gen_candidates(prompt: str, n: int, base_size: str, api_key: str) -> List[str]:
    urls: List[str] = []
    for i in range(max(1, min(n, 8))):
        url = generate_with_doubao(prompt, api_key, ratio="16:9", resolution=base_size, send_ratio_params=True)
        if url:
            web = _download_to_generated(url, f"wf_cand_{_safe_stem(prompt)}_{i+1}")
            if web:
                urls.append(web)
    return urls


@app.post("/workflow/start", response_class=HTMLResponse)
async def workflow_start(request: Request, prompt: str = Form(...), n: int = Form(4), base_size: str = Form("1024x576")):
    api_key = os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        return templates.TemplateResponse(
            "workflow.html",
            {"request": request, "step": "start", "prompt": prompt, "candidates": [], "error": "缺少 AIHUBMIX_API_KEY"},
            status_code=500,
        )
    candidates = _gen_candidates(prompt, n, base_size, api_key)
    return templates.TemplateResponse(
        "workflow.html",
        {
            "request": request,
            "step": "select",
            "prompt": prompt,
            "candidates": candidates,
            "error": None,
            "base_size": base_size,
            "n": n,
            "refine_note": "保留构图与主体不变，修正细节缺陷，提升材质与质感，避免水印与文字",
            "target_res": "1920x1080",
        },
    )


def _abs_from_web(web_path: str) -> Path:
    if web_path.startswith("/static/"):
        return Path("." + web_path).resolve()
    return Path(web_path).resolve()


@app.post("/workflow/process", response_class=HTMLResponse)
async def workflow_process(
    request: Request,
    prompt: str = Form(...),
    selected: List[str] = Form([]),
    refine_note: str = Form(""),
    target_res: str = Form("1920x1080"),
    do_upscale: Optional[str] = Form("on"),
    do_sharpen: Optional[str] = Form("on"),
):
    api_key = os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        return templates.TemplateResponse("workflow.html", {"request": request, "step": "start", "prompt": prompt, "candidates": [], "error": "缺少 AIHUBMIX_API_KEY"}, status_code=500)

    if not selected:
        return templates.TemplateResponse("workflow.html", {"request": request, "step": "start", "prompt": prompt, "candidates": [], "error": "请至少选择一张候选图"}, status_code=400)

    final_items: List[Dict[str, str]] = []
    for web in selected[:2]:
        abs_p = _abs_from_web(web)
        try:
            b = abs_p.read_bytes()
        except Exception:
            continue
        b64 = refine_with_banana_nano(prompt, refine_note, b, api_key)
        if not b64:
            # fallback: use original
            out_web = _save_bytes_to_generated(b, f"wf_refine_fail_{_safe_stem(prompt)}")
            final_items.append({"url": out_web, "qa": ""})
            continue
        # save refined
        refined_web = _save_bytes_to_generated(base64.b64decode(b64), f"wf_refined_{_safe_stem(prompt)}")
        # crop/resize
        tr_w, tr_h = (1920, 1080)
        if "x" in target_res:
            try:
                tr_w, tr_h = [int(x) for x in target_res.split("x", 1)]
            except Exception:
                pass
        refined_abs = _abs_from_web(refined_web)
        cropped = _enforce_16_9_and_resize(refined_abs, (tr_w, tr_h)) or refined_abs
        cur = cropped
        # upscale 2x (optional)
        if (do_upscale or "on") in ("on", "true", "1"):
            cur = _upscale_2x(cur) or cur
        # optional face restore
        if os.getenv("USE_FACE_FIX", "0").strip() in ("1", "true", "on"):
            cur = _face_restore_if_available(cur) or cur
        # tune
        if (do_sharpen or "on") in ("on", "true", "1"):
            cur = _sharpen_and_tune(cur) or cur
        # to web path
        final_web = "/static/generated/" + cur.name
        # qa
        qa = ""
        try:
            qa = qa_inspect(Path("." + final_web).read_bytes(), api_key) or ""
        except Exception:
            pass
        final_items.append({"url": final_web, "qa": qa})

    return templates.TemplateResponse(
        "workflow.html",
        {"request": request, "step": "final", "prompt": prompt, "final_items": final_items, "error": None},
        status_code=200 if final_items else 500,
    )


@app.get("/health")
async def health():
    return {"ok": True}


# ---------------- Gallery (generated/finished) ----------------
def _list_images_in(subdir: str) -> List[Dict[str, str]]:
    items: List[Dict[str, str]] = []
    root = static_dir / subdir
    try:
        files = []
        for p in root.glob("*"):
            if p.is_file() and p.suffix.lower() in {".png", ".jpg", ".jpeg", ".webp"}:
                try:
                    mtime = p.stat().st_mtime
                except Exception:
                    mtime = 0.0
                files.append((mtime, p))
        files.sort(key=lambda x: x[0], reverse=True)
        for _, p in files:
            items.append({
                "url": f"/static/{subdir}/{p.name}",
                "name": p.name,
            })
    except Exception:
        pass
    return items


def _list_transfer_files() -> List[Dict[str, str]]:
    """List compressed files under static/files for download."""
    items: List[Dict[str, str]] = []
    root = static_dir / "files"
    try:
        root.mkdir(parents=True, exist_ok=True)
    except Exception:
        return items
    allowed = {
        ".zip", ".rar", ".7z", ".tar", ".gz", ".bz2", ".xz",
        ".tar.gz", ".tar.bz2", ".tar.xz", ".tgz", ".tbz2", ".txz",
    }
    def _exts(p: Path) -> str:
        try:
            return "".join(p.suffixes).lower()
        except Exception:
            return p.suffix.lower()
    files: List[tuple[float, Path]] = []
    for p in root.glob("*"):
        if not p.is_file():
            continue
        ext = _exts(p)
        if ext not in allowed:
            continue
        try:
            mtime = p.stat().st_mtime
        except Exception:
            mtime = 0.0
        files.append((mtime, p))
    files.sort(key=lambda x: x[0], reverse=True)
    for _, p in files:
        try:
            size = p.stat().st_size
        except Exception:
            size = 0
        def _hsize(n: int) -> str:
            units = ["B", "KB", "MB", "GB"]
            s = float(n)
            i = 0
            while s >= 1024 and i < len(units) - 1:
                s /= 1024.0
                i += 1
            return f"{s:.1f} {units[i]}"
        items.append({
            "url": f"/static/files/{p.name}",
            "name": p.name,
            "size": size,
            "size_human": _hsize(size),
        })
    return items


@app.get("/images", response_class=HTMLResponse)
async def images(request: Request, tab: str = "generated"):
    tab = tab if tab in {"generated", "finished"} else "generated"
    images = _list_images_in(tab)
    other = "finished" if tab == "generated" else "generated"
    return templates.TemplateResponse(
        "images.html",
        {
            "request": request,
            "images": images,
            "active_tab": tab,
            "other_tab": other,
            "active_count": len(images),
            "other_count": len(_list_images_in(other)),
            "active_nav": "images",
        },
    )


@app.get("/files", response_class=HTMLResponse)
async def files_page(request: Request):
    files = _list_transfer_files()
    return templates.TemplateResponse(
        "files.html",
        {
            "request": request,
            "files": files,
            "count": len(files),
            "active_nav": "files",
        },
    )
