import time
from typing import Dict, List, Optional
import uuid

from transformers import AutoTokenizer

from my_vllm.core.sequence import Sequence
from my_vllm.scheduler.scheduler import Scheduler
from my_vllm.worker.model_runner import ModelRunner


class LLMEngine:
    """LLM引擎"""

    def __init__(self, model_config, cache_config, scheduler_config):
        self.model_config = model_config
        self.cache_config = cache_config
        self.scheduler_config = scheduler_config

        # 初始化组件
        self.tokenizer = AutoTokenizer.from_pretrained(model_config.tokenizer)
        self.scheduler = Scheduler(scheduler_config)
        self.model_runner = ModelRunner(model_config)

        # 请求管理
        self.requests: Dict[str, Dict] = {}

    def add_request(
            self,
            prompt: str,
            sampling_params: Dict,
            request_id: Optional[str] = None
    ) -> str:
        """添加请求"""
        if request_id is None:
            request_id = str(uuid.uuid4())

        # 分词
        prompt_token_ids = self.tokenizer.encode(prompt)

        # 创建序列
        sequence = Sequence(
            request_id=request_id,
            prompt=prompt,
            prompt_token_ids=prompt_token_ids
        )

        # 保存请求信息
        self.requests[request_id] = {
            'sequence': sequence,
            'sampling_params': sampling_params,
            'start_time': time.time(),
            'output_text': '',
            'output_tokens': []
        }

        # 添加到调度器
        self.scheduler.add_sequence(sequence)

        return request_id

    def step(self) -> List[Dict]:
        """执行一个推理步骤"""
        # 调度
        batch = self.scheduler.schedule()

        if batch is None:
            return []

        # 执行模型
        outputs = self.model_runner.execute_model(batch)

        # 处理输出
        results = self._process_outputs(batch, outputs)

        return results

    def _process_outputs(self, batch, outputs) -> List[Dict]:
        """处理模型输出"""
        results = []

        # 这里简化处理，实际需要更复杂的逻辑
        for sequence in batch['sequences']:
            if sequence.is_finished():
                result = {
                    'request_id': sequence.request_id,
                    'text': sequence.output_text,
                    'tokens': sequence.output_token_ids,
                    'finish_reason': 'length'
                }
                results.append(result)

        return results

    def has_unfinished_requests(self) -> bool:
        """检查是否有未完成的请求"""
        return len(self.scheduler.running) > 0 or len(self.scheduler.waiting) > 0