from typing import Dict, Any, Optional
import time
import httpx
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
from intelli_port.commons.observability import provider_requests_total, provider_request_latency_ms, provider_errors_total, provider_queue_length, provider_kv_cache_hit_rate, provider_gpu_mem_used_mb
from intelli_port.commons.context import corr_id_var
from intelli_port.commons.config import load_settings


_client: Optional[httpx.Client] = None
_settings = load_settings()
_RETRY_ATTEMPTS = int(getattr(_settings, "http_retry_attempts", 3) or 3)
_RETRY_MAX = float(getattr(_settings, "http_retry_backoff_max", 2.0) or 2.0)


def _get_client() -> httpx.Client:
    global _client
    if _client is None:
        _client = httpx.Client(
            timeout=float(getattr(_settings, "http_client_timeout", 5.0)),
            limits=httpx.Limits(
                max_keepalive_connections=20,
                max_connections=int(getattr(_settings, "http_client_max_connections", 100)),
            ),
            headers={"connection": "keep-alive"},
        )
    return _client


class HybridFlowQwenProvider:
    def __init__(self):
        self.endpoint = getattr(_settings, "qwen_hybridflow_endpoint", "")

    @retry(
        reraise=False,
        stop=stop_after_attempt(_RETRY_ATTEMPTS),
        wait=wait_exponential(multiplier=0.2, min=0.2, max=_RETRY_MAX),
        retry=retry_if_exception_type((httpx.ConnectError, httpx.ReadTimeout)),
    )
    def infer(self, text: str, lang: str = "zh") -> Dict[str, Any]:
        if not self.endpoint:
            return {"reply": text, "lang": lang, "provider": "hybridflow", "latency_ms": 1}
        payload = {"model": "qwen", "messages": [{"role": "user", "content": text}]}
        t0 = time.time()
        client = _get_client()
        try:
            corr = corr_id_var.get()
            headers = {"x-correlation-id": corr} if corr else None
            r = client.post(self.endpoint, json=payload, headers=headers)
            provider_request_latency_ms.labels(name="qwen_hybridflow").observe(max(0, (time.time() - t0) * 1000))
            provider_requests_total.labels(name="qwen_hybridflow", status=str(r.status_code)).inc()
            try:
                qlen = float(r.headers.get("x-queue-length", "0") or 0.0)
                hit = float(r.headers.get("x-kv-cache-hit-rate", "0") or 0.0)
                gpum = float(r.headers.get("x-gpu-mem-used-mb", "0") or 0.0)
                provider_queue_length.labels(name="qwen_hybridflow").set(qlen)
                provider_kv_cache_hit_rate.labels(name="qwen_hybridflow").set(hit)
                provider_gpu_mem_used_mb.labels(name="qwen_hybridflow").set(gpum)
            except Exception:
                pass
            if r.status_code != 200:
                return {"reply": text, "lang": lang, "provider": "hybridflow", "latency_ms": 5}
            data = r.json()
            c = data.get("choices", [{}])[0].get("message", {}).get("content", "")
            return {"reply": c or text, "lang": lang, "provider": "hybridflow", "latency_ms": 5}
        except Exception as e:
            provider_errors_total.labels(name="qwen_hybridflow", type=e.__class__.__name__).inc()
            return {"reply": text, "lang": lang, "provider": "hybridflow", "latency_ms": 5}