import os
import sys
import argparse
from typing import List, Optional

from dotenv import load_dotenv


def _make_llm(provider_override: Optional[str] = None, model_override: Optional[str] = None, silent: bool = False):
    """
    Construct an LLM client based on whichever API key is available.

    Priority (current order):
        1. Google Gemini (GOOGLE_API_KEY)
        2. Groq (GROQ_API_KEY)
        3. OpenAI (OPENAI_API_KEY)
        4. ZhipuAI / 智谱 (ZHIPU_API_KEY)
        5. Alibaba DashScope / 通义千问 (DASHSCOPE_API_KEY)
        6. Baidu Qianfan / 文心 (QIANFAN_AK + QIANFAN_SK)
        7. Local Ollama (OLLAMA_MODEL)

    You can override the selection by setting PREFERRED_PROVIDER to one of:
        'google','groq','openai','zhipu','dashscope','qianfan','ollama'
    """
    # Lazy imports so users without certain libs don't fail immediately
    google_key = os.getenv("GOOGLE_API_KEY")
    groq_key = os.getenv("GROQ_API_KEY")
    openai_key = os.getenv("OPENAI_API_KEY")
    zhipu_key = os.getenv("ZHIPU_API_KEY")
    dashscope_key = os.getenv("DASHSCOPE_API_KEY")
    qianfan_ak = os.getenv("QIANFAN_AK")
    qianfan_sk = os.getenv("QIANFAN_SK")
    ollama_model = os.getenv("OLLAMA_MODEL")
    preferred = (provider_override or os.getenv("PREFERRED_PROVIDER") or "").lower().strip()

    def pick(order):
        """Return first provider id in order that has credentials/models."""
        for name in order:
            if name == "google" and google_key:
                return "google"
            if name == "groq" and groq_key:
                return "groq"
            if name == "openai" and openai_key:
                return "openai"
            if name == "zhipu" and zhipu_key:
                return "zhipu"
            if name == "dashscope" and dashscope_key:
                return "dashscope"
            if name == "qianfan" and qianfan_ak and qianfan_sk:
                return "qianfan"
            if name == "ollama" and ollama_model:
                return "ollama"
        return None

    if preferred:
        chosen = pick([preferred])
        if not chosen:
            if not silent:
                print(f"[warn] PREFERRED_PROVIDER='{preferred}' not usable (missing key/model or dependency), falling back.")
        else:
            provider = chosen
            # go straight to construction below
            return _build_provider_llm(provider, google_key, groq_key, openai_key, zhipu_key, dashscope_key, qianfan_ak, qianfan_sk, ollama_model, model_override=model_override, silent=silent)

    provider = pick(["google","groq","openai","zhipu","dashscope","qianfan","ollama"])
    if provider:
        return _build_provider_llm(provider, google_key, groq_key, openai_key, zhipu_key, dashscope_key, qianfan_ak, qianfan_sk, ollama_model, model_override=model_override, silent=silent)

    raise RuntimeError(
        "No API key/model found. Set one of GOOGLE_API_KEY, GROQ_API_KEY, OPENAI_API_KEY, "
        "ZHIPU_API_KEY, DASHSCOPE_API_KEY, QIANFAN_AK/QIANFAN_SK, or OLLAMA_MODEL."
    )


def _resolve_provider(provider_override: Optional[str] = None) -> Optional[str]:
    """Return the provider string that would be selected (without constructing any LLM)."""
    google_key = os.getenv("GOOGLE_API_KEY")
    groq_key = os.getenv("GROQ_API_KEY")
    openai_key = os.getenv("OPENAI_API_KEY")
    zhipu_key = os.getenv("ZHIPU_API_KEY")
    dashscope_key = os.getenv("DASHSCOPE_API_KEY")
    qianfan_ak = os.getenv("QIANFAN_AK")
    qianfan_sk = os.getenv("QIANFAN_SK")
    ollama_model = os.getenv("OLLAMA_MODEL")
    preferred = (provider_override or os.getenv("PREFERRED_PROVIDER") or "").lower().strip()

    order = ["google","groq","openai","zhipu","dashscope","qianfan","ollama"]

    def has(name: str) -> bool:
        return (
            (name == "google" and bool(google_key)) or
            (name == "groq" and bool(groq_key)) or
            (name == "openai" and bool(openai_key)) or
            (name == "zhipu" and bool(zhipu_key)) or
            (name == "dashscope" and bool(dashscope_key)) or
            (name == "qianfan" and bool(qianfan_ak and qianfan_sk)) or
            (name == "ollama" and bool(ollama_model))
        )

    if preferred and has(preferred):
        return preferred
    if preferred and not has(preferred):
        # fall back silently
        pass
    for name in order:
        if has(name):
            return name
    return None


def _build_provider_llm(provider, google_key, groq_key, openai_key, zhipu_key, dashscope_key, qianfan_ak, qianfan_sk, ollama_model, model_override: Optional[str] = None, silent: bool = False):
    """Instantiate the selected provider's chat model (minimal default models)."""
    temperature = float(os.getenv("LLM_TEMPERATURE", "0.2"))
    if provider == "google":
        from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
        model = model_override or os.getenv("GOOGLE_MODEL", "gemini-flash-lite-latest")
        return ChatGoogleGenerativeAI(model=model, temperature=temperature)
    if provider == "groq":
        from langchain_groq import ChatGroq
        model = model_override or os.getenv("GROQ_MODEL", "llama-3.1-8b-instant")
        return ChatGroq(model=model, temperature=temperature)
    if provider == "openai":
        from langchain_openai import ChatOpenAI
        model = model_override or os.getenv("OPENAI_MODEL", "gpt-4o-mini")
        return ChatOpenAI(model=model, temperature=temperature)
    if provider == "zhipu":
        # 智谱 GLM (needs: pip install zhipuai) + LangChain community
        try:
            from langchain_community.chat_models import ChatZhipuAI  # type: ignore
        except ImportError as e:
            raise ImportError("Missing dependency for ZhipuAI. Run: pip install zhipuai") from e
        model = model_override or os.getenv("ZHIPU_MODEL", "glm-4-flash")
        return ChatZhipuAI(model=model, temperature=temperature, api_key=zhipu_key)
    if provider == "dashscope":
        # 阿里云 DashScope / 通义千问 (pip install dashscope)
        try:
            from langchain_community.chat_models import ChatDashScope  # type: ignore
        except ImportError as e:
            raise ImportError("Missing dependency for DashScope. Run: pip install dashscope") from e
        model = model_override or os.getenv("DASHSCOPE_MODEL", "qwen-plus")
        return ChatDashScope(model=model, temperature=temperature, dashscope_api_key=dashscope_key)
    if provider == "qianfan":
        # 百度千帆 (pip install qianfan) - LangChain offers QianfanChatEndpoint
        try:
            from langchain_community.chat_models import QianfanChatEndpoint  # type: ignore
        except ImportError as e:
            raise ImportError("Missing dependency for Qianfan. Run: pip install qianfan") from e
        model = model_override or os.getenv("QIANFAN_MODEL", "ERNIE-Speed-128K")
        return QianfanChatEndpoint(model=model, temperature=temperature, qianfan_ak=qianfan_ak, qianfan_sk=qianfan_sk)
    if provider == "ollama":
        # 本地 Ollama (pip install langchain-ollama ; install Ollama app separately) e.g. OLLAMA_MODEL=qwen2:7b
        from langchain_ollama import ChatOllama
        model = model_override or ollama_model
        return ChatOllama(model=model, temperature=temperature)
    raise ValueError(f"Unsupported provider: {provider}")


def detect_available_providers() -> List[str]:
    """Return list of providers that have the minimum credentials / model set AND dependency importability."""
    mapping_checks = [
        ("google", lambda: bool(os.getenv("GOOGLE_API_KEY"))),
        ("groq", lambda: bool(os.getenv("GROQ_API_KEY"))),
        ("openai", lambda: bool(os.getenv("OPENAI_API_KEY"))),
        ("zhipu", lambda: bool(os.getenv("ZHIPU_API_KEY"))),
        ("dashscope", lambda: bool(os.getenv("DASHSCOPE_API_KEY"))),
        ("qianfan", lambda: bool(os.getenv("QIANFAN_AK") and os.getenv("QIANFAN_SK"))),
        ("ollama", lambda: bool(os.getenv("OLLAMA_MODEL"))),
    ]
    available = []
    for name, checker in mapping_checks:
        try:
            if checker():
                # Light dependency check for community models
                if name == "zhipu":
                    __import__("zhipuai")
                if name == "dashscope":
                    __import__("dashscope")
                if name == "qianfan":
                    __import__("qianfan")
                if name == "ollama":
                    # langchain-ollama must be importable
                    __import__("langchain_ollama")
                available.append(name)
        except Exception:
            # silently skip missing deps
            pass
    return available


def build_arg_parser() -> argparse.ArgumentParser:
    p = argparse.ArgumentParser(description="Windows-Use multi-provider demo")
    p.add_argument("task", nargs="*", help="Task for the agent, e.g. Open Notepad and type Hello")
    p.add_argument("--provider", dest="provider", help="Force provider (google|groq|openai|zhipu|dashscope|qianfan|ollama)")
    p.add_argument("--model", dest="model", help="Override model name for selected provider")
    p.add_argument("--list", action="store_true", help="List detected available providers and exit")
    p.add_argument("--check", action="store_true", help="Run environment/provider self-check and exit")
    p.add_argument("--browser", choices=["edge","chrome","firefox"], default=os.getenv("BROWSER","edge"), help="Browser for context (default: edge)")
    p.add_argument("--vision", action="store_true", help="Enable vision mode if provider supports it (experimental)")
    p.add_argument("--no-minimize", action="store_true", help="Do not auto-minimize windows (default already False)")
    p.add_argument("--dry-run", action="store_true", help="Only resolve provider and model; do not launch agent")
    return p


def self_check(verbose: bool = True) -> int:
    avail = detect_available_providers()
    if verbose:
        print("[Self-Check] Available providers:", ", ".join(avail) if avail else "None")
        missing = []
        if not avail:
            missing.append("No providers detected. Set at least one API key or OLLAMA_MODEL.")
        if missing:
            for m in missing:
                print(" -", m)
    return 0 if avail else 1


def main(argv: Optional[List[str]] = None):
    load_dotenv()
    parser = build_arg_parser()
    args = parser.parse_args(argv)

    if args.list:
        providers = detect_available_providers()
        if providers:
            print("Detected providers:", ", ".join(providers))
        else:
            print("No providers detected.")
        return 0

    if args.check:
        return self_check(verbose=True)

    task = " ".join(args.task) if args.task else None
    if not task:
        print("Tip: Pass a task, e.g.: python demo.py Open Notepad and type Hello")
        task = input("Enter your task: ").strip()

    llm = _make_llm(provider_override=args.provider, model_override=args.model)

    if args.dry_run:
        print("[Dry-Run] Provider resolved. Not launching agent.")
        return 0

    from windows_use.agent import Agent, Browser  # imported here to reduce import time/no GUI if dry-run

    browser_map = {
        "edge": Browser.EDGE,
        "chrome": Browser.CHROME,
        "firefox": Browser.FIREFOX,
    }
    browser_choice = browser_map.get(args.browser, Browser.EDGE)

    agent = Agent(
        llm=llm,
        browser=browser_choice,
        use_vision=bool(args.vision),
        auto_minimize=not args.no_minimize,
    )

    agent.print_response(query=task)
    return 0


if __name__ == "__main__":
    # Basic guard for Windows platform; the package targets Windows 7–11
    if os.name != "nt":
        print("This demo is intended to run on Windows.")
    sys.exit(main())
