import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from typing import List, Dict, Optional
import time


class SimpleLLMEngine:
    """简化版LLM引擎"""

    def __init__(self, model_name: str, max_batch_size: int = 4):
        self.model_name = model_name
        self.max_batch_size = max_batch_size

        # 加载模型和分词器
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModelForCausalLM.from_pretrained(
            model_name, torch_dtype=torch.float16
        ).cuda()

        self.model.eval()

        # 请求队列
        self.request_queue: List[Dict] = []
        self.active_requests: Dict[str, Dict] = {}

    def add_request(self, prompt: str, max_tokens: int = 100) -> str:
        """添加请求"""
        request_id = f"req_{len(self.request_queue)}"

        request = {
            'id': request_id,
            'prompt': prompt,
            'max_tokens': max_tokens,
            'generated_tokens': [],
            'generated_text': '',
            'start_time': time.time()
        }

        self.request_queue.append(request)
        return request_id

    def generate(self, prompt: str, max_tokens: int = 100) -> str:
        """简单的生成方法"""
        inputs = self.tokenizer(prompt, return_tensors="pt").to("cuda")

        with torch.no_grad():
            outputs = self.model.generate(
                **inputs,
                max_new_tokens=max_tokens,
                do_sample=True,
                temperature=0.7,
                pad_token_id=self.tokenizer.eos_token_id
            )

        generated_text = self.tokenizer.decode(
            outputs[0][inputs.input_ids.shape[1]:],
            skip_special_tokens=True
        )

        return generated_text

    def step(self) -> Dict[str, str]:
        """执行一个推理步骤"""
        results = {}

        # 处理队列中的请求
        while self.request_queue and len(self.active_requests) < self.max_batch_size:
            request = self.request_queue.pop(0)
            self.active_requests[request['id']] = request

        # 批量处理活跃请求
        if self.active_requests:
            batch_results = self._process_batch()
            results.update(batch_results)

        return results

    def _process_batch(self) -> Dict[str, str]:
        """处理批次"""
        results = {}

        for req_id, request in list(self.active_requests.items()):
            if len(request['generated_tokens']) >= request['max_tokens']:
                # 请求完成
                results[req_id] = request['generated_text']
                del self.active_requests[req_id]
            else:
                # 生成下一个token
                next_token = self._generate_next_token(request)
                request['generated_tokens'].append(next_token)
                request['generated_text'] += self.tokenizer.decode([next_token])

        return results

    def _generate_next_token(self, request: Dict) -> int:
        """生成下一个token"""
        # 构建输入
        prompt_tokens = self.tokenizer.encode(request['prompt'])
        input_tokens = prompt_tokens + request['generated_tokens']

        inputs = torch.tensor([input_tokens]).cuda()

        with torch.no_grad():
            outputs = self.model(inputs)
            logits = outputs.logits[0, -1, :]

            # 简单采样
            probs = torch.softmax(logits, dim=-1)
            next_token = torch.multinomial(probs, 1).item()

        return next_token