# aegaeon_proto/worker.py
"""
Distributed worker that:
- consumes per-node queue (and global fallback) using BRPOPLPUSH semantics,
- groups prefill requests per model locally and runs prefill pipeline to fill KV,
- writes model->node mapping to Redis when prefetch completes (model locality),
- publishes results into RESULT_HASH and publishes to RESULT_PUB_CHANNEL for WS & notifications.
"""

import os
import time
import json
import traceback
import asyncio
import logging
import redis
from typing import Dict, List

from instance import GPUInstance  # uses your existing GPUInstance implementation

# config
REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0")
NODE_ID = os.environ.get("NODE_ID", os.uname().nodename)
LOCAL_GPUS = os.environ.get("LOCAL_GPUS", "0")
LOCAL_GPUS = [int(x.strip()) for x in LOCAL_GPUS.split(",") if x.strip()]
NODE_QUEUE = f"aegaeon:req:node:{NODE_ID}"
GLOBAL_QUEUE = "aegaeon:requests"
PROCESSING_LIST = "aegaeon:processing"
RESULT_HASH = "aegaeon:results"
RESULT_PUB_CHANNEL = "aegaeon:results:channel"
MODEL_LOC_PREFIX = "aegaeon:model_locations:"
NODE_REGISTRY = "aegaeon:nodes"

r = redis.Redis.from_url(REDIS_URL, decode_responses=True)
log = logging.getLogger("aegaeon.worker")
logging.basicConfig(level=logging.INFO)

# create local GPU instances
instances = [GPUInstance(gpu_id=g, role="worker") for g in LOCAL_GPUS]
instance_load = {inst.gpu_id: 0 for inst in instances}

# Local request grouping per model (prefill groups)
local_groups: Dict[str, List[dict]] = {}  # model -> list of metas

# Parameters
GROUP_SIZE = int(os.environ.get("GROUP_SIZE", "8"))
PREFILL_TIMEOUT = float(os.environ.get("PREFILL_TIMEOUT", "0.5"))  # seconds to wait before forcing group processing

def pick_instance_for_job():
    # pick least-loaded GPUInstance
    if not instances:
        return None
    best = min(instances, key=lambda inst: instance_load.get(inst.gpu_id, 0))
    return best

def register_node_info():
    info = {
        "node": NODE_ID,
        "gpus": LOCAL_GPUS,
        "ts": time.time(),
    }
    r.hset(NODE_REGISTRY, NODE_ID, json.dumps(info))

def write_result_and_publish(req_id, obj):
    r.hset(RESULT_HASH, req_id, json.dumps(obj))
    # pub-sub for WS forwarding
    try:
        r.publish(RESULT_PUB_CHANNEL, json.dumps(obj))
    except Exception:
        log.exception("publish failed")

def mark_model_location(model: str):
    key = MODEL_LOC_PREFIX + model
    r.sadd(key, NODE_ID)

async def handle_group(model: str, metas: List[dict]):
    """Process a collected group of prefill requests for same model on local node."""
    req_ids = [m["req_id"] for m in metas]
    log.info(f"[{NODE_ID}] handling group model={model} size={len(metas)} reqs={req_ids}")
    inst = pick_instance_for_job()
    if inst is None:
        # no GPU: fail all
        for m in metas:
            write_result_and_publish(m["req_id"], {"error": "no_local_gpu", "node": NODE_ID})
        return

    instance_load[inst.gpu_id] += len(metas)
    try:
        # scale_up (this will call native prefetch if file path etc.)
        await inst.scale_up(model, prefetch=True)
        # when prefetch done and vLLM created, register model->node mapping
        mark_model_location(model)

        # run prefill for each request (in parallel or sequential; here sequential for simplicity)
        for meta in metas:
            try:
                req_id = meta["req_id"]
                prompt = meta["prompt"]
                tokens, kv = await inst.run_prefill(req_id, prompt, max_tokens=1)
                result = {
                    "req_id": req_id,
                    "node": NODE_ID,
                    "gpu": inst.gpu_id,
                    "model": model,
                    "tokens": tokens,
                    "processed_at": time.time(),
                }
                # callback support
                if meta.get("callback_url"):
                    # fire-and-forget HTTP POST to callback_url (async)
                    asyncio.create_task(_http_post_callback(meta["callback_url"], result))
                write_result_and_publish(req_id, result)
            except Exception:
                log.exception("prefill failed for meta %s", meta)
                write_result_and_publish(meta.get("req_id", "unknown"), {"error": "prefill_failed"})
    finally:
        instance_load[inst.gpu_id] -= len(metas)

async def _http_post_callback(url: str, payload: dict):
    import aiohttp
    try:
        async with aiohttp.ClientSession() as s:
            await s.post(url, json=payload, timeout=5)
    except Exception:
        log.exception("callback failed for %s", url)

async def worker_consume_loop():
    """Main consumer loop: atomically move from queue->processing and append to local group."""
    log.info(f"Worker starting on node={NODE_ID} gpus={LOCAL_GPUS} connecting to {REDIS_URL}")
    register_node_info()
    while True:
        try:
            # Try node-specific queue first with blocking pop
            item = r.brpoplpush(NODE_QUEUE, PROCESSING_LIST, timeout=5)
            if not item:
                # fallback to global queue
                item = r.brpoplpush(GLOBAL_QUEUE, PROCESSING_LIST, timeout=5)
            if not item:
                # idle
                await asyncio.sleep(0.01)
                continue

            meta = json.loads(item)
            model = meta.get("model")
            if model is None:
                # ack and continue
                r.lrem(PROCESSING_LIST, 0, item)
                continue

            # append to local group
            group = local_groups.setdefault(model, [])
            group.append(meta)
            # if group reached threshold, schedule immediate processing
            if len(group) >= GROUP_SIZE:
                to_process = group.copy()
                local_groups[model] = []
                asyncio.create_task(handle_group(model, to_process))
            else:
                # schedule a timer to process after PREFILL_TIMEOUT
                async def timer_capture(m=model):
                    await asyncio.sleep(PREFILL_TIMEOUT)
                    g = local_groups.get(m, [])
                    if g:
                        to_proc = g.copy()
                        local_groups[m] = []
                        await handle_group(m, to_proc)
                # fire-and-forget timer
                asyncio.create_task(timer_capture())
            # remove from processing list (we only used it as temporary; we already started work)
            r.lrem(PROCESSING_LIST, 0, item)
        except Exception:
            log.exception("worker consume loop error")
            await asyncio.sleep(1.0)

async def decoding_daemon():
    """Optional: placeholder for a local decode loop; currently not implemented fully.
       This could aggregate decode requests and call inst.run_decode_turn accordingly.
    """
    while True:
        await asyncio.sleep(1.0)
        # Placeholder: in production you would collect decode batches and call run_decode_turn()

def main():
    loop = asyncio.get_event_loop()
    loop.create_task(worker_consume_loop())
    loop.create_task(decoding_daemon())
    loop.run_forever()

if __name__ == "__main__":
    main()
