from collections import deque

from nanovllm.config import Config
from nanovllm.engine.sequence import Sequence, SequenceStatus
from nanovllm.engine.block_manager import BlockManager
from nanovllm.engine.metadata import SpecDecodeMetadata


class Scheduler:

    def __init__(self, config: Config):
        self.max_num_seqs = config.max_num_seqs
        self.max_num_batched_tokens = config.max_num_batched_tokens
        self.eos = config.eos
        self.block_manager = BlockManager(config.num_kvcache_blocks, config.kvcache_block_size)
        self.waiting: deque[Sequence] = deque()
        self.running: deque[Sequence] = deque()

    def is_finished(self):
        return not self.waiting and not self.running

    def add(self, seq: Sequence):
        self.waiting.append(seq)

    def schedule(self) -> tuple[list[Sequence], bool]:
        # prefill
        scheduled_seqs = []
        num_seqs = 0
        num_batched_tokens = 0
        while self.waiting and num_seqs < self.max_num_seqs:
            seq = self.waiting[0]
            if num_batched_tokens + seq.num_tokens > self.max_num_batched_tokens or not self.block_manager.can_allocate(seq):
                break
            num_seqs += 1
            self.block_manager.allocate(seq)
            num_batched_tokens += seq.num_tokens - seq.num_cached_tokens
            seq.status = SequenceStatus.RUNNING
            self.waiting.popleft()
            self.running.append(seq)
            scheduled_seqs.append(seq)
        if scheduled_seqs:
            return scheduled_seqs, True

        # decode
        while self.running and num_seqs < self.max_num_seqs:
            seq = self.running.popleft()
            while not self.block_manager.can_append(seq):
                if self.running:
                    self.preempt(self.running.pop())
                else:
                    self.preempt(seq)
                    break
            else:
                num_seqs += 1
                self.block_manager.may_append(seq)
                scheduled_seqs.append(seq)
        assert scheduled_seqs
        self.running.extendleft(reversed(scheduled_seqs))
        return scheduled_seqs, False

    def preempt(self, seq: Sequence):
        seq.status = SequenceStatus.WAITING
        self.block_manager.deallocate(seq)
        self.waiting.appendleft(seq)

    def postprocess(self, seqs: list[Sequence], token_ids: list[list[int]], spec_meatadatas: list[SpecDecodeMetadata]) -> list[bool]:
        for seq, token_id, spec_metadata in zip(seqs, token_ids, spec_meatadatas):
            seq.append_token(token_id)
            if self.block_manager.blocks[seq.block_table[-1]].ref_count == -2:    # clear the pre-allocate block for speculative decode if rejection sample
                if seq.num_blocks == len(seq.block_table):  # reserve the pre-allocate block for kv-cache
                    self.block_manager.blocks[seq.block_table[-1]].ref_count = 1
                    # save prefix hash block
                    second_last_block = self.block_manager.blocks[seq.block_table[-2]]
                    second_last_token_ids = seq.block(seq.num_blocks-2)
                    prefix = self.block_manager.blocks[seq.block_table[-3]].hash if len(seq.block_table) > 2 else -1
                    h = self.block_manager.compute_hash(second_last_token_ids, prefix)
                    second_last_block.update(h, second_last_token_ids)
                    self.block_manager.hash_to_block_id[h] = second_last_block.block_id
                else:   # clear pre-allocate block
                    self.block_manager.blocks[seq.block_table[-1]].ref_count = 0
                    self.block_manager._deallocate_block(seq.block_table[-1])
                    seq.block_table.pop()
            seq.update_spec_status(spec_metadata)
            if (not seq.ignore_eos and self.eos in token_id) or seq.num_completion_tokens >= seq.max_tokens:
                seq.status = SequenceStatus.FINISHED
                self.block_manager.deallocate(seq)
                self.running.remove(seq)
    