# SPDX-License-Identifier: Apache-2.0
"""
alpha_factory_v1.backend.orchestrator
=====================================

Alpha-Factory v1 👁️✨ — Control-Tower v3.0.0  (2025-05-02)
────────────────────────────────────────────────────────────
▸ Auto-discovers & supervises every agent (pkg + plugin entry-points)  
▸ Dual interface → FastAPI (REST/OpenAPI)  + gRPC (A2A-0.5) – served in-parallel  
▸ Kafka event/experience bus **or** seamless in-proc fallback (air-gapped dev)  
▸ Memory-Fabric bridge (vector + graph) exposed to agents & REST  
▸ Prometheus /metrics, OpenTelemetry tracing, JSON logs, health-probes  
▸ OpenAI Agents SDK + Google ADK soft-bridges (auto-activate when installed)  
▸ Graceful-degradation: every heavy optional dep is a soft-import;  
  the orchestrator never crashes because a library or external service is missing.  
▸ Dev/Edge mode (`--dev` flag *or* `DEV_MODE=true`) → in-memory stubs, no Kafka,
  no databases — demo runs on a Raspberry Pi zero-trust air-gap.

Run examples
────────────
    # local dev – all bells & whistles
    python -m alpha_factory_v1.backend.orchestrator

    # edge / air-gapped
    DEV_MODE=true ALPHA_ENABLED_AGENTS="Manufacturing,Energy" \
        python -m alpha_factory_v1.backend.orchestrator --dev

    # container
    docker compose -f demos/docker-compose.cross_industry.yml up
"""
from __future__ import annotations

# ────────────────────────────── std-lib ───────────────────────────────
import asyncio
import contextlib
import importlib
import json
import logging
import os
import signal
import sys
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional

# ────────────────────────── soft-imports (all optional) ───────────────
with contextlib.suppress(ModuleNotFoundError):
    from fastapi import FastAPI, HTTPException
    from fastapi.responses import PlainTextResponse
    import uvicorn

with contextlib.suppress(ModuleNotFoundError):
    import grpc
    from concurrent import futures  # noqa: F401 – imported for typing only

with contextlib.suppress(ModuleNotFoundError):
    from kafka import KafkaProducer

with contextlib.suppress(ModuleNotFoundError):
    from prometheus_client import (
        Counter,
        Gauge,
        Histogram,
        CONTENT_TYPE_LATEST,
        generate_latest,
        start_http_server,
    )

with contextlib.suppress(ModuleNotFoundError):
    from opentelemetry import trace

with contextlib.suppress(ModuleNotFoundError):
    from openai.agents import AgentRuntime, AgentContext  # type: ignore[attr-defined]

with contextlib.suppress(ModuleNotFoundError):
    import adk  # Google Agent Development Kit  # type: ignore


# ───────────────────── mandatory local imports ────────────────────────
from backend.agents import list_agents, get_agent  # auto-disc helpers

# Memory fabric is optional → graceful stub when absent
try:
    from backend.memory_fabric import mem  # type: ignore
except ModuleNotFoundError:  # pragma: no cover
    class _VecDummy:  # pylint: disable=too-few-public-methods
        def add(self, *_a, **_kw): ...
        def search(self, *_a, **_kw): return []
        def recent(self, *_a, **_kw): return []

    class _GraphDummy:  # pylint: disable=too-few-public-methods
        def add(self, *_a, **_kw): ...
        def query(self, *_a, **_kw): return []

    class _MemStub:  # pylint: disable=too-few-public-methods
        vector = _VecDummy()
        graph = _GraphDummy()
    mem = _MemStub()  # type: ignore


# ────────────────────────── configuration ─────────────────────────────
ENV = os.getenv
DEV_MODE = ENV("DEV_MODE", "false").lower() == "true" or "--dev" in sys.argv
LOGLEVEL = ENV("LOGLEVEL", "INFO").upper()
PORT = int(ENV("PORT", "8000"))
METRICS_PORT = int(ENV("METRICS_PORT", "0"))
A2A_PORT = int(ENV("A2A_PORT", "0"))
SSL_DISABLE = ENV("INSECURE_DISABLE_TLS", "false").lower() == "true"
KAFKA_BROKER = None if DEV_MODE else ENV("ALPHA_KAFKA_BROKER")
CYCLE_DEFAULT = int(ENV("ALPHA_CYCLE_SECONDS", "60"))
MAX_CYCLE_SEC = int(ENV("MAX_CYCLE_SEC", "30"))
ENABLED = {s.strip() for s in ENV("ALPHA_ENABLED_AGENTS", "").split(",") if s.strip()}

logging.basicConfig(
    level=LOGLEVEL,
    format="%(asctime)s.%(msecs)03d %(levelname)-8s | %(name)s | %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    force=True,
)
log = logging.getLogger("alpha_factory.orchestrator")

# OTEL tracer — noop if lib missing
tracer = trace.get_tracer(__name__) if "trace" in globals() else None  # type: ignore

# ─────────────────── Prometheus metrics (safe-noop) ───────────────────
def _noop(*_a, **_kw):  # type: ignore
    class _N:  # pylint: disable=too-few-public-methods
        def labels(self, *_a, **_kw): return self
        def observe(self, *_a): ...
        def inc(self, *_a): ...
        def set(self, *_a): ...
    return _N()

MET_LAT = (
    Histogram("af_agent_cycle_latency_ms", "Per-cycle latency", ["agent"])
    if "Histogram" in globals()
    else _noop()
)
MET_ERR = (
    Counter("af_agent_cycle_errors_total", "Exceptions per agent", ["agent"])
    if "Counter" in globals()
    else _noop()
)
MET_UP = (
    Gauge("af_agent_up", "1 = agent alive according to HB", ["agent"])
    if "Gauge" in globals()
    else _noop()
)

if METRICS_PORT and "start_http_server" in globals():
    start_http_server(METRICS_PORT)
    log.info("Prometheus metrics exposed at :%d/metrics", METRICS_PORT)

# ─────────────────── Kafka producer ▸ fallback bus ────────────────────
if KAFKA_BROKER and "KafkaProducer" in globals():
    _producer = KafkaProducer(
        bootstrap_servers=KAFKA_BROKER.split(","),
        value_serializer=lambda v: json.dumps(v).encode(),
        linger_ms=50,
    )

    def publish(topic: str, msg: Dict[str, Any]) -> None:
        _producer.send(topic, msg)
else:  # in-memory async queue bus
    _queues: Dict[str, asyncio.Queue] = {}
    if KAFKA_BROKER and not DEV_MODE:
        log.warning("Kafka unavailable → falling back to in-proc bus")
    def publish(topic: str, msg: Dict[str, Any]) -> None:  # type: ignore
        _queues.setdefault(topic, asyncio.Queue()).put_nowait(msg)

# ───────────────────── helper utilities ───────────────────────────────
def utc_now() -> str:
    return datetime.now(timezone.utc).isoformat(timespec="milliseconds")

async def maybe_await(fn, *a, **kw):  # type: ignore
    return await fn(*a, **kw) if asyncio.iscoroutinefunction(fn) else await asyncio.to_thread(fn, *a, **kw)

# ─────────────── OpenAI Agents & Google ADK bridges ───────────────────
class _OAI:
    _runtime: Optional[AgentRuntime] = None
    @classmethod
    def runtime(cls) -> Optional[AgentRuntime]:
        if cls._runtime is None and "AgentRuntime" in globals():
            cls._runtime = AgentRuntime()
            log.info("OpenAI Agents SDK detected → runtime initialised")
        return cls._runtime

async def _adk_register() -> None:
    if DEV_MODE or "adk" not in globals():
        return
    client = adk.Client()
    await client.register(node_type="orchestrator", metadata={"version": "v3.0.0"})
    log.info("Registered with Google ADK mesh  (node-id %s)", client.node_id)

# ──────────────────────── Agent Runner ────────────────────────────────
class AgentRunner:
    """Wrap one agent instance, schedule & supervise its .run_cycle()."""

    def __init__(self, name: str):
        self.name = name
        self.inst = get_agent(name)
        self.period = getattr(self.inst, "CYCLE_SECONDS", CYCLE_DEFAULT)
        self.spec = getattr(self.inst, "SCHED_SPEC", None)
        self.next_ts = 0.0
        self.last_beat = time.time()
        self.task: Optional[asyncio.Task] = None
        self._calc_next()

        # Auto-register OpenAI Agents tools
        if "AgentContext" in globals() and isinstance(self.inst, AgentContext):
            _OAI.runtime().register(self.inst)  # type: ignore[arg-type]

    # ────────────────────────────────────────────────────────────────
    def _calc_next(self) -> None:
        """Compute next execution timestamp (croniter if available)."""
        now = time.time()
        if self.spec:
            with contextlib.suppress(ModuleNotFoundError, ValueError):
                from croniter import croniter  # type: ignore
                self.next_ts = croniter(self.spec, datetime.fromtimestamp(now)).get_next(float)
                return
        self.next_ts = now + self.period

    # ────────────────────────────────────────────────────────────────
    async def maybe_step(self) -> None:
        """Execute .run_cycle() when due; never raise outwards."""
        if time.time() < self.next_ts:
            return
        self._calc_next()

        async def _cycle() -> None:
            t0 = time.time()
            span_cm = tracer.start_as_current_span(self.name) if tracer else contextlib.nullcontext()
            with span_cm:
                try:
                    await asyncio.wait_for(maybe_await(self.inst.run_cycle), timeout=MAX_CYCLE_SEC)
                except asyncio.TimeoutError:
                    MET_ERR.labels(self.name).inc()
                    log.error("%s run_cycle exceeded %ss budget – skipped", self.name, MAX_CYCLE_SEC)
                except Exception as exc:  # noqa: BLE001
                    MET_ERR.labels(self.name).inc()
                    log.exception("%s.run_cycle crashed: %s", self.name, exc)
                finally:
                    dur_ms = (time.time() - t0) * 1_000
                    MET_LAT.labels(self.name).observe(dur_ms)
                    self.last_beat = time.time()
                    publish("agent.cycle", {"agent": self.name, "latency_ms": dur_ms, "ts": utc_now()})

        self.task = asyncio.create_task(_cycle())

# ─────────────────────────── REST API ─────────────────────────────────
def _build_rest(runners: Dict[str, AgentRunner]) -> Optional[FastAPI]:
    if "FastAPI" not in globals():
        return None

    app = FastAPI(
        title="Alpha-Factory Orchestrator",
        version="3.0.0",
        docs_url="/docs",
        redoc_url=None,
    )

    @app.get("/healthz", response_class=PlainTextResponse)
    async def _health() -> str:  # noqa: D401
        return "ok"

    @app.get("/agents")
    async def _agents() -> List[str]:  # noqa: D401
        return list(runners)

    @app.post("/agent/{name}/trigger")
    async def _trigger(name: str):  # noqa: D401, ANN001
        if name not in runners:
            raise HTTPException(404, "Agent not found")
        runners[name].next_ts = 0  # run ASAP
        return {"queued": True}

    # ─── Memory-Fabric helper endpoints ──────────────────────────────
    @app.get("/memory/{agent}/recent")
    async def _recent(agent: str, n: int = 25):  # noqa: D401
        return mem.vector.recent(agent, n)

    @app.get("/memory/search")
    async def _search(q: str, k: int = 5):  # noqa: D401
        return mem.vector.search(q, k)

    @app.get("/metrics", response_class=PlainTextResponse)
    async def _metrics():  # noqa: D401
        if "generate_latest" not in globals():
            raise HTTPException(503, "prometheus_client not installed")
        return PlainTextResponse(generate_latest(), media_type=CONTENT_TYPE_LATEST)

    return app

# ─────────────────────────── gRPC A2A ────────────────────────────────
async def _serve_grpc(runners: Dict[str, AgentRunner]) -> None:
    if not A2A_PORT or "grpc" not in globals():
        return
    try:
        from backend.proto import a2a_pb2, a2a_pb2_grpc  # generated stubs  # type: ignore
    except ModuleNotFoundError:
        log.warning("A2A_PORT set but proto stubs missing – gRPC disabled")
        return

    class Peer(a2a_pb2_grpc.PeerServiceServicer):  # type: ignore
        async def Stream(self, req_iter, ctx):  # noqa: N802
            async for req in req_iter:
                kind = req.WhichOneof("payload")
                if kind == "trigger" and req.trigger.name in runners:
                    runners[req.trigger.name].next_ts = 0
                    yield a2a_pb2.StreamReply(ack=a2a_pb2.Ack(id=req.id))
                elif kind == "status":
                    stats = [
                        a2a_pb2.AgentStat(name=n, next_run=int(r.next_ts))
                        for n, r in runners.items()
                    ]
                    yield a2a_pb2.StreamReply(status_reply=a2a_pb2.StatusReply(stats=stats))

    creds = None
    if not SSL_DISABLE:
        cert_dir = Path(ENV("TLS_CERT_DIR", "/certs"))
        crt, key = cert_dir / "server.crt", cert_dir / "server.key"
        if crt.exists() and key.exists():
            creds = grpc.ssl_server_credentials(((key.read_bytes(), crt.read_bytes()),))

    server = grpc.aio.server()
    a2a_pb2_grpc.add_PeerServiceServicer_to_server(Peer(), server)
    bind = f"[::]:{A2A_PORT}"
    server.add_secure_port(bind, creds) if creds else server.add_insecure_port(bind)
    await server.start()
    log.info("gRPC A2A server listening on %s (%s)", bind, "TLS" if creds else "plaintext")

# ─────────────────────── Heartbeat monitor ───────────────────────────
async def _hb_watch(runners: Dict[str, AgentRunner]) -> None:
    while True:
        now = time.time()
        for n, r in runners.items():
            alive = int(now - r.last_beat < r.period * 3.0)
            MET_UP.labels(n).set(alive)
        await asyncio.sleep(5)

# ───────────────────────────── main() ────────────────────────────────
async def _main() -> None:
    # ─── Discover/instantiate agents ─────────────────────────────────
    avail = list_agents()
    names = [n for n in avail if not ENABLED or n in ENABLED]
    if not names:
        log.error("No agents selected – ENABLED=%s   ABORT", ENABLED or "ALL")
        sys.exit(1)
    runners = {n: AgentRunner(n) for n in names}
    log.info("Bootstrapped %d agent(s): %s", len(runners), ", ".join(runners))

    # ─── REST API server (async, non-blocking) ───────────────────────
    api = _build_rest(runners)
    if api and "uvicorn" in globals():
        cfg = uvicorn.Config(api, host="0.0.0.0", port=PORT, log_level=LOGLEVEL.lower())
        asyncio.create_task(uvicorn.Server(cfg).serve())
        log.info("REST UI →  http://localhost:%d/docs", PORT)

    # ─── Kick off auxiliary subsystems ───────────────────────────────
    await asyncio.gather(_serve_grpc(runners), _adk_register(), _hb_watch(runners))

    # ─── Graceful shutdown handling ──────────────────────────────────
    stop_ev = asyncio.Event()
    for sig in (signal.SIGINT, signal.SIGTERM):
        with contextlib.suppress(RuntimeError):
            asyncio.get_running_loop().add_signal_handler(sig, stop_ev.set)

    # ─── Core scheduling loop ────────────────────────────────────────
    while not stop_ev.is_set():
        await asyncio.gather(*(r.maybe_step() for r in runners.values()))
        await asyncio.sleep(0.25)

    # ─── Drain & exit cleanly ────────────────────────────────────────
    await asyncio.gather(*(r.task for r in runners.values() if r.task), return_exceptions=True)
    log.info("Orchestrator shutdown complete")

# ───────────────────────── CLI entry-point ──────────────────────────
if __name__ == "__main__":
    try:
        asyncio.run(_main())
    except KeyboardInterrupt:
        pass
