import os
import json
import urllib.request
from typing import List


class LLMClientError(RuntimeError):
    """Raised when no LLM provider is available or a provider call fails."""
    pass


def _http_post(url: str, payload: dict, headers: dict) -> dict:
    data = json.dumps(payload).encode("utf-8")
    req = urllib.request.Request(url, data=data, headers=headers, method="POST")
    with urllib.request.urlopen(req, timeout=60) as resp:  # nosec B310
        text = resp.read().decode("utf-8", errors="ignore")
        return json.loads(text)


# Configuration (kept simple and environment-driven)
OLLAMA_BASE = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434/v1")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3.1:latest")
OPENAI_KEY = os.getenv("OPENAI_API_KEY")
OPENAI_MODEL = os.getenv("AUTOGEN_MODEL", "gpt-4o-mini")


def llm_chat(messages: List[dict], provider: str | None = None, override_openai_key: str | None = None) -> str:
    """Call an available LLM provider and return assistant text.

    Args:
        messages: chat messages
        provider: optional override, 'ollama' or 'openai'. If None, original priority applies.
        override_openai_key: if provided, use this OpenAI key instead of env var.

    Raises LLMClientError on fatal problems.
    """
    temperature = float(os.getenv("LLM_TEMPERATURE", "0"))

    def try_ollama() -> str:
        if not OLLAMA_BASE:
            raise LLMClientError("Ollama base URL not configured")
        url = f"{OLLAMA_BASE.rstrip('/')}/chat/completions"
        payload = {"model": OLLAMA_MODEL, "messages": messages, "temperature": temperature}
        res = _http_post(url, payload, {"Content-Type": "application/json", "Authorization": f"Bearer {os.getenv('OLLAMA_API_KEY','ollama') }"})
        return res["choices"][0]["message"]["content"]

    def try_openai(key: str) -> str:
        if not key:
            # Explicitly indicate invalid key for callers to handle
            raise LLMClientError("api-key无效")
        url = "https://api.openai.com/v1/chat/completions"
        payload = {"model": OPENAI_MODEL, "messages": messages, "temperature": temperature}
        try:
            res = _http_post(url, payload, {"Content-Type": "application/json", "Authorization": f"Bearer {key}"})
            return res["choices"][0]["message"]["content"]
        except Exception as e:
            # If OpenAI call fails (auth or other), surface a clear invalid-key error
            # Do not silently fall back to Ollama when caller explicitly requested OpenAI
            raise LLMClientError("api-key无效")

    # If caller forces a provider
    if provider == "ollama":
        try:
            return try_ollama()
        except Exception as e:
            raise LLMClientError(str(e))
    if provider == "openai":
        key = override_openai_key or OPENAI_KEY
        try:
            return try_openai(key)
        except Exception as e:
            raise LLMClientError(str(e))

    # Default behavior: try Ollama first, then OpenAI
    try:
        return try_ollama()
    except Exception:
        key = override_openai_key or OPENAI_KEY
        try:
            return try_openai(key)
        except Exception as e:
            raise LLMClientError(str(e))
