import os
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
import sys
from typing import Optional

from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Request, BackgroundTasks
from fastapi.responses import JSONResponse, FileResponse

# Ensure local imports work
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
sys.path.append(os.path.join(current_dir, "indextts"))

from indextts.infer_v2 import IndexTTS2  # noqa: E402

app = FastAPI(title="IndexTTS Inference API", version="0.1.0")

# Model initialization
MODEL_DIR = os.environ.get("INDEXTTS_MODEL_DIR", os.path.join(current_dir, "checkpoints"))
CFG_PATH = os.environ.get("INDEXTTS_CFG_PATH", os.path.join(MODEL_DIR, "config.yaml"))
USE_FP16 = os.environ.get("INDEXTTS_FP16", "false").lower() == "true"
USE_DEEPSPEED = os.environ.get("INDEXTTS_DEEPSPEED", "false").lower() == "true"
USE_CUDA_KERNEL = os.environ.get("INDEXTTS_CUDA_KERNEL", "false").lower() == "true"

os.makedirs(os.path.join(current_dir, "outputs", "api"), exist_ok=True)

try:
    tts = IndexTTS2(
        model_dir=MODEL_DIR,
        cfg_path=CFG_PATH,
        use_fp16=USE_FP16,
        use_deepspeed=USE_DEEPSPEED,
        use_cuda_kernel=USE_CUDA_KERNEL,
    )
except Exception as e:
    # Delay error to first request so the app can still start and report nicely
    tts = None
    app.state.model_init_error = str(e)


@app.get("/health")
def health():
    if getattr(app.state, "model_init_error", None):
        return JSONResponse({"status": "degraded", "error": app.state.model_init_error})
    return {"status": "ok"}


def _ensure_model_ready():
    if getattr(app.state, "model_init_error", None):
        raise HTTPException(status_code=500, detail=f"Model failed to initialize: {app.state.model_init_error}")
    if "tts" not in globals() or tts is None:
        raise HTTPException(status_code=500, detail="Model is not ready")


@app.post("/infer", response_class=FileResponse)
async def infer(
    background_tasks: BackgroundTasks,
    request: Request,
    text: Optional[str] = Form(default=None),
    prompt_audio: Optional[UploadFile] = File(default=None),
    output_name: Optional[str] = Form(default=None),
):
    """
    Perform TTS inference.

    Supports two usage patterns:
    - multipart/form-data with fields: text (str), prompt_audio (file)
    - application/json with fields: text (str), prompt_path (str - path to existing wav)
    """
    _ensure_model_ready()

    # Decide input mode by content type
    content_type = request.headers.get("content-type", "")
    spk_audio_path: Optional[str] = None

    if content_type.startswith("application/json"):
        body = await request.json()
        text = body.get("text")
        spk_audio_path = body.get("prompt_path")
        if not spk_audio_path or not os.path.exists(spk_audio_path):
            raise HTTPException(status_code=400, detail="prompt_path is required and must exist")
    else:
        if prompt_audio is None:
            raise HTTPException(status_code=400, detail="prompt_audio file is required in multipart/form-data")
        if text is None:
            raise HTTPException(status_code=400, detail="text is required")
        # Save uploaded file to a temp path
        upload_dir = os.path.join(current_dir, "uploads")
        os.makedirs(upload_dir, exist_ok=True)
        spk_audio_path = os.path.join(upload_dir, prompt_audio.filename)
        with open(spk_audio_path, "wb") as f:
            f.write(await prompt_audio.read())

    if not text or not isinstance(text, str) or len(text.strip()) == 0:
        raise HTTPException(status_code=400, detail="text must be a non-empty string")

    # Prepare output
    out_dir = os.path.join(current_dir, "outputs", "api")
    os.makedirs(out_dir, exist_ok=True)
    filename = (output_name or "tts_output") + ".wav"
    output_path = os.path.join(out_dir, filename)

    try:
        tts.infer(
            spk_audio_prompt=spk_audio_path,
            text=text,
            output_path=output_path,
            verbose=False,
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"inference failed: {e}")

    # Optionally cleanup uploaded file after response is sent
    if content_type.startswith("multipart/") and prompt_audio is not None:
        def _cleanup(path: str):
            try:
                if os.path.exists(path):
                    os.remove(path)
            except Exception:
                pass
        background_tasks.add_task(_cleanup, spk_audio_path)

    return FileResponse(output_path, media_type="audio/wav", filename=os.path.basename(output_path))


# Optional: local dev entrypoint
if __name__ == "__main__":
    import uvicorn

    host = os.environ.get("HOST", "0.0.0.0")
    port = int(os.environ.get("PORT", "8000"))
    uvicorn.run("api:app", host=host, port=port, reload=False)
