from typing import List, Dict, Optional
import heapq
from dataclasses import dataclass

from my_vllm.core.block_manager import BlockAllocator
from my_vllm.core.sequence import Sequence


@dataclass
class SequenceGroup:
    """序列组"""
    sequences: List[Sequence]
    priority: int = 0
    arrival_time: float = 0.0

    def __lt__(self, other):
        if self.priority != other.priority:
            return self.priority > other.priority
        return self.arrival_time < other.arrival_time


class Scheduler:
    """调度器"""

    def __init__(self, config):
        self.config = config
        self.waiting: List[SequenceGroup] = []
        self.running: Dict[str, SequenceGroup] = {}
        self.block_allocator = BlockAllocator(
            config.num_gpu_blocks, config.block_size
        )

    def add_sequence(self, sequence: Sequence):
        """添加序列"""
        seq_group = SequenceGroup([sequence])
        heapq.heappush(self.waiting, seq_group)

    def schedule(self) -> Optional[Dict]:
        """执行调度"""
        # 1. 检查完成的序列
        self._remove_finished_sequences()

        # 2. 从等待队列中选择序列
        new_sequences = self._select_new_sequences()

        # 3. 分配资源
        if new_sequences:
            allocated = self._allocate_resources(new_sequences)
            if allocated:
                # 将序列移到运行队列
                for seq_group in new_sequences:
                    for seq in seq_group.sequences:
                        self.running[seq.request_id] = seq_group

        # 4. 准备批次
        batch = self._prepare_batch()

        return batch if batch else None

    def _remove_finished_sequences(self):
        """移除完成的序列"""
        finished_ids = []
        for req_id, seq_group in self.running.items():
            if all(seq.is_finished() for seq in seq_group.sequences):
                finished_ids.append(req_id)

        for req_id in finished_ids:
            seq_group = self.running.pop(req_id)
            # 释放资源
            for seq in seq_group.sequences:
                if seq.block_table:
                    blocks = [self.block_allocator.allocated_blocks[block_id]
                              for block_id in seq.block_table]
                    self.block_allocator.free(blocks)

    def _select_new_sequences(self) -> List[SequenceGroup]:
        """选择新序列"""
        selected = []
        total_tokens = 0

        while self.waiting and len(self.running) < self.config.max_num_seqs:
            seq_group = heapq.heappop(self.waiting)

            # 检查是否超过批次限制
            seq_len = sum(seq.get_len() for seq in seq_group.sequences)
            if total_tokens + seq_len > self.config.max_num_batched_tokens:
                # 放回队列
                heapq.heappush(self.waiting, seq_group)
                break

            selected.append(seq_group)
            total_tokens += seq_len

        return selected

    def _allocate_resources(self, sequences: List[SequenceGroup]) -> bool:
        """分配资源"""
        try:
            for seq_group in sequences:
                for seq in seq_group.sequences:
                    blocks = self.block_allocator.allocate(seq.get_len())
                    seq.block_table = [block.block_id for block in blocks]

            return True
        except RuntimeError:
            # 资源不足，回滚
            for seq_group in sequences:
                for seq in seq_group.sequences:
                    if seq.block_table:
                        blocks = [self.block_allocator.allocated_blocks[block_id]
                                  for block_id in seq.block_table]
                        self.block_allocator.free(blocks)
                        seq.block_table = []
            return False

    def _prepare_batch(self) -> Optional[Dict]:
        """准备批次"""
        if not self.running:
            return None

        sequences = []
        for seq_group in self.running.values():
            sequences.extend(seq_group.sequences)

        # 准备批次数据
        batch = {
            'sequences': sequences,
            'input_ids': torch.cat([torch.tensor(seq.token_ids) for seq in sequences]),
            'block_tables': torch.tensor([seq.block_table for seq in sequences]),
            'seq_lens': torch.tensor([seq.get_len() for seq in sequences]),
        }

        return batch