# scheduler.py
import asyncio
import time
from typing import Dict, List, Any
from instance import GPUInstance

# Token-level scheduler (grouped prefill + batched decoding round-robin)
# Implements the core heuristics from Aegaeon (Algorithm 1 and 2). See paper. :contentReference[oaicite:19]{index=19} :contentReference[oaicite:20]{index=20}

MAX_GPSIZE = 8
QMAX = 4.0  # max time quota per turn (seconds)

class PrefillGroup:
    def __init__(self, model_name):
        self.model = model_name
        self.requests = []  # list of (req_id, prompt)

    @property
    def size(self):
        return len(self.requests)

class PrefillInstanceState:
    def __init__(self, inst: GPUInstance):
        self.inst = inst
        self.job_queue: List[PrefillGroup] = []

class DecodeBatch:
    def __init__(self, model_name):
        self.model = model_name
        self.requests = []  # batched requests for same model

class DecodeInstanceState:
    def __init__(self, inst: GPUInstance):
        self.inst = inst
        self.work_list: List[DecodeBatch] = []

class TokenLevelScheduler:
    def __init__(self, prefill_instances: List[GPUInstance], decode_instances: List[GPUInstance]):
        self.prefill_states = [PrefillInstanceState(i) for i in prefill_instances]
        self.decode_states = [DecodeInstanceState(i) for i in decode_instances]
        self.lock = asyncio.Lock()

    async def submit_request(self, req_id: str, model: str, prompt: str):
        """
        Entry point for new request: schedule prefill grouping (Algorithm 1).
        """
        # Grouped Prefill scheduling: try to add into existing group for same model
        best_inst = None
        min_load = float('inf')
        for st in self.prefill_states:
            # Try to find same-model group
            found = False
            for g in st.job_queue:
                if g.model == model and g.size < MAX_GPSIZE:
                    g.requests.append((req_id, prompt))
                    return
            # otherwise compute load heuristic: #groups as proxy
            load = sum(len(g.requests) for g in st.job_queue)
            if load < min_load:
                min_load = load
                best_inst = st
        # create new group on least loaded instance
        g = PrefillGroup(model)
        g.requests.append((req_id, prompt))
        best_inst.job_queue.append(g)

    async def prefill_loop(self, inst_state: PrefillInstanceState):
        """
        Continuous loop: pop from front group and execute its first request prefill (batch size=1 as in paper).
        After prefill execution, dispatch to decode pool (append to decode instances' worklist).
        """
        while True:
            if not inst_state.job_queue:
                await asyncio.sleep(0.01)
                continue
            group = inst_state.job_queue[0]
            req_id, prompt = group.requests.pop(0)
            if group.size == 0:
                inst_state.job_queue.pop(0)
            # ensure instance running model
            await inst_state.inst.scale_up(group.model, prefetch=False)
            # run prefill (first token)
            tokens, kvhandle = await inst_state.inst.run_prefill(req_id, prompt, max_tokens=1)
            # dispatch to decode pool: pick least loaded decode instance
            await self.dispatch_to_decode(req_id, group.model, prompt, kvhandle)
            await asyncio.sleep(0)  # yield

    async def dispatch_to_decode(self, req_id, model, prompt, kvhandle):
        # find decode instance with smallest work_list length
        best = min(self.decode_states, key=lambda s: sum(len(b.requests) for b in s.work_list))
        # try to append to existing batch for same model
        for b in best.work_list:
            if b.model == model:
                b.requests.append({"req_id": req_id, "continuation": prompt})
                return
        # else new batch
        nb = DecodeBatch(model)
        nb.requests.append({"req_id": req_id, "continuation": prompt})
        best.work_list.append(nb)

    async def decoding_loop(self, dec_state: DecodeInstanceState):
        """
        Implements batched decoding-phase scheduling (Algorithm 2).
        Continuously run rounds; for each round compute quotas q_i and give each batch a turn.
        """
        while True:
            if not dec_state.work_list:
                await asyncio.sleep(0.01)
                continue
            # reorder to group same-model batches adjacently: simple reorder pass
            # compute per-batch n_i = d / t_i approximations. For prototype, use fixed t per token.
            d = 0.1  # target TBT (example)
            t_k = 0.025  # per-token execution cost estimate, could be profiled
            n_list = [max(1, int(d / t_k)) for _ in dec_state.work_list]
            c = sum(0.5 for _ in dec_state.work_list)  # simplified auto-scaling overhead sum (seconds); in production this is measured
            alpha = max(c / (min(n_list) * QMAX) + sum(1.0 / n for n in n_list), 0.5)
            quotas = []
            inv_n_sum = sum(1.0 / n for n in n_list)
            for n_i in n_list:
                q_i = c / (n_i * (alpha - inv_n_sum))
                if q_i > QMAX:
                    q_i = QMAX
                if q_i < 0.01:
                    q_i = 0.01
                quotas.append(q_i)
            # execute each batch for q_i seconds (in prototype we approximate by running a small number of token steps)
            for idx, batch in enumerate(list(dec_state.work_list)):
                q = quotas[idx]
                # ensure instance running the model (might need preemptive scale)
                await dec_state.inst.scale_up(batch.model, prefetch=True)
                # compute how many tokens to step in this time quota (approx)
                tokens_per_sec = 40  # approx
                max_tokens = max(1, int(tokens_per_sec * q))
                # create sub-batches for decode
                sub_requests = batch.requests[:max_tokens]
                res = await dec_state.inst.run_decode_turn(sub_requests, q)
                # remove served requests or keep partially if still decoding
                # Very simplified: assume one token per request per turn
                # update the batch
                for _ in range(min(len(batch.requests), len(sub_requests))):
                    batch.requests.pop(0)
                if len(batch.requests) == 0:
                    dec_state.work_list.remove(batch)
            await asyncio.sleep(0)  # yield

    async def start(self):
        # start loops for all instances
        tasks = []
        for st in self.prefill_states:
            tasks.append(asyncio.create_task(self.prefill_loop(st)))
        for ds in self.decode_states:
            tasks.append(asyncio.create_task(self.decoding_loop(ds)))
        await asyncio.gather(*tasks)
