from fastapi import APIRouter
from pydantic import BaseModel
from typing import Optional, Dict, Any
from intelli_port.langgraph_module import build_main_graph
import time
from intelli_port.commons.observability import e2e_latency_ms
from intelli_port.model.model_inference import PaddleASRInferencer, PaddleOCRInferencer, PaddleTTSInferencer


router = APIRouter()
graph = build_main_graph()
asr = PaddleASRInferencer()
ocr = PaddleOCRInferencer()
tts = PaddleTTSInferencer()


class TextRequest(BaseModel):
    text: str
    modality: Optional[str] = None
    scan_data: Optional[Dict[str, Any]] = None
    confirm: Optional[bool] = None
    session: Optional[Dict[str, Any]] = None
    tts: Optional[bool] = None

class PerceptionRequest(BaseModel):
    asr_path: Optional[str] = None
    ocr_path: Optional[str] = None
    confirm: Optional[bool] = None
    tts: Optional[bool] = None
    session: Optional[Dict[str, Any]] = None


@router.post("/intent/recognize")
async def intent_recognize(req: TextRequest):
    payload = req.model_dump()
    t0 = time.time()
    state = graph.invoke(payload, config={"configurable": {"thread_id": "default"}})
    e2e_latency_ms.labels(path="/intent/recognize").observe(max(0, (time.time() - t0) * 1000))
    return {"intent": state.get("intent", ""), "confidence": state.get("confidence", 0), "state": state}


@router.post("/service/resolve")
async def service_resolve(req: TextRequest):
    payload = req.model_dump()
    t0 = time.time()
    state = graph.invoke(payload, config={"configurable": {"thread_id": "default"}})
    e2e_latency_ms.labels(path="/service/resolve").observe(max(0, (time.time() - t0) * 1000))
    resp = {"service": state.get("service", {}), "state": state}
    if bool(getattr(req, "tts", False)):
        s = resp["service"]
        speak = None
        it = str(state.get("intent") or s.get("type") or "")
        lang = str(((req.session or {}) or {}).get("lang") or "zh").lower()
        if (s or {}).get("type") == "gate_query":
            gate = s.get("gate")
            speak = (f"您的登机口是 {gate}" if lang == "zh" else (f"Your gate is {gate}")) if gate else None
        elif (s or {}).get("type") == "faq":
            speak = s.get("answer")
        elif (s or {}).get("type") == "navigate":
            steps = s.get("steps") or []
            if steps:
                speak = ("，".join(steps[:3]) if lang == "zh" else ", ".join([f"Go to {st}" for st in steps[:3]]))
            else:
                speak = None
        elif (s or {}).get("type") == "identity":
            speak = ("身份已验证" if s.get("verified") else "身份校验失败") if lang == "zh" else ("Identity verified" if s.get("verified") else "Identity verification failed")
        elif it in {"baggage", "security_wait", "special_assistance"}:
            base = s.get("policy") or s.get("advice")
            if not base and (s or {}).get("type") == "special_assistance":
                base = "协助请求已创建" if lang == "zh" else "Assistance request created"
            speak = base
        if speak:
            out_path = f"tts_{int(time.time()*1000)}.wav"
            r = tts.synthesize_to_wav(speak, out_path, lang=lang)
            if r.get("ok"):
                resp["audio_path"] = out_path
    return resp


@router.post("/perception/resolve")
async def perception_resolve(req: PerceptionRequest):
    payload: Dict[str, Any] = {}
    lang = str(((getattr(req, "session", None) or {}) or {}).get("lang") or "zh").lower()
    if req.asr_path:
        r = asr.transcribe_wav(req.asr_path, lang=lang)
        payload["text"] = r.get("text", "")
        payload["modality"] = "voice"
    if req.ocr_path:
        r = ocr.scan_image(req.ocr_path, lang=lang)
        payload["scan_data"] = {"document_type": "id_card", "mrz": None, "id_no": r.get("text", "")}
        payload["modality"] = "scan"
    if req.confirm is not None:
        payload["confirm"] = req.confirm
    t0 = time.time()
    state = graph.invoke(payload, config={"configurable": {"thread_id": "default"}})
    e2e_latency_ms.labels(path="/perception/resolve").observe(max(0, (time.time() - t0) * 1000))
    resp = {"service": state.get("service", {}), "intent": state.get("intent", ""), "confidence": state.get("confidence", 0), "state": state}
    if bool(getattr(req, "tts", False)):
        s = resp["service"]
        speak = None
        it = str(state.get("intent") or s.get("type") or "")
        if (s or {}).get("type") == "gate_query":
            gate = s.get("gate")
            speak = (f"您的登机口是 {gate}" if lang == "zh" else (f"Your gate is {gate}")) if gate else None
        elif (s or {}).get("type") == "faq":
            speak = s.get("answer")
        elif (s or {}).get("type") == "navigate":
            steps = s.get("steps") or []
            if steps:
                speak = ("，".join(steps[:3]) if lang == "zh" else ", ".join([f"Go to {st}" for st in steps[:3]]))
            else:
                speak = None
        elif (s or {}).get("type") == "identity":
            speak = ("身份已验证" if s.get("verified") else "身份校验失败") if lang == "zh" else ("Identity verified" if s.get("verified") else "Identity verification failed")
        elif it in {"baggage", "security_wait", "special_assistance"}:
            base = s.get("policy") or s.get("advice")
            if not base and (s or {}).get("type") == "special_assistance":
                base = "协助请求已创建" if lang == "zh" else "Assistance request created"
            speak = base
        if speak:
            out_path = f"tts_{int(time.time()*1000)}.wav"
            r = tts.synthesize_to_wav(speak, out_path, lang=lang)
            if r.get("ok"):
                resp["audio_path"] = out_path
    return resp