"""
Gemma 3 270M 推理引擎 - 使用 PyTorch + 编译优化（类似 LiteRT）
专为小尺寸模型优化，提供最佳推理性能
"""
import torch
import logging
from typing import Dict, List, Optional, Union
import numpy as np

logger = logging.getLogger(__name__)


class Gemma3Inference:
    """Gemma 3 270M推理引擎 - PyTorch + 编译优化版"""
    
    def __init__(self, model, tokenizer, device: str):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device
    
    def generate(
        self,
        prompt: Union[str, List[str]],
        max_new_tokens: int = 512,
        temperature: float = 0.7,
        top_p: float = 0.9,
        top_k: int = 50,
        repetition_penalty: float = 1.1,
        do_sample: bool = True,
        stop_sequences: Optional[List[str]] = None,
        **kwargs
    ) -> Dict:
        """
        生成文本 - 使用 LiteRT 优化
        
        Args:
            prompt: 输入提示文本或文本列表
            max_new_tokens: 最大生成token数
            temperature: 采样温度
            top_p: nucleus采样参数
            top_k: top-k采样参数
            repetition_penalty: 重复惩罚系数
            do_sample: 是否使用采样
            stop_sequences: 停止序列列表
            **kwargs: 其他生成参数
        
        Returns:
            生成结果字典
        """
        try:
            if isinstance(prompt, str):
                prompts = [prompt]
            else:
                prompts = prompt
            
            # 编码输入
            inputs = self.tokenizer(
                prompts,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=2048
            )
            
            # 移动到设备
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            # 准备生成参数
            generation_config = {
                "max_new_tokens": max_new_tokens,
                "temperature": temperature if do_sample else None,
                "top_p": top_p if do_sample else None,
                "top_k": top_k if do_sample else None,
                "repetition_penalty": repetition_penalty,
                "do_sample": do_sample,
                "pad_token_id": self.tokenizer.pad_token_id,
                "eos_token_id": self.tokenizer.eos_token_id,
                **kwargs
            }
            
            # 移除 None 值
            generation_config = {k: v for k, v in generation_config.items() if v is not None}

            # 处理停止序列
            if stop_sequences:
                stop_token_ids = []
                for stop_seq in stop_sequences:
                    stop_tokens = self.tokenizer.encode(stop_seq, add_special_tokens=False)
                    stop_token_ids.extend(stop_tokens)
                if stop_token_ids:
                    generation_config["stop_token_ids"] = stop_token_ids
            
            # 生成文本（模型已通过 torch.compile 优化）
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    **generation_config
                )
            
            # 解码输出
            generated_texts = self.tokenizer.batch_decode(
                outputs,
                skip_special_tokens=True,
                clean_up_tokenization_spaces=True
            )
            
            # 提取生成的部分（去除输入部分）
            results = []
            for i, generated_text in enumerate(generated_texts):
                prompt_text = prompts[i]
                # 移除输入提示，只保留生成的部分
                if generated_text.startswith(prompt_text):
                    generated_only = generated_text[len(prompt_text):].strip()
                else:
                    generated_only = generated_text.strip()
                
                results.append({
                    "generated_text": generated_only,
                    "full_text": generated_text,
                    "prompt": prompt_text
                })
            
            if isinstance(prompt, str):
                return results[0] if results else {"generated_text": "", "full_text": "", "prompt": prompt}
            else:
                return {"results": results}
            
        except Exception as e:
            logger.error(f"生成失败: {str(e)}")
            raise
    
    def generate_stream(
        self,
        prompt: str,
        max_new_tokens: int = 512,
        temperature: float = 0.7,
        top_p: float = 0.9,
        top_k: int = 50,
        repetition_penalty: float = 1.1,
        do_sample: bool = True,
        stop_sequences: Optional[List[str]] = None,
        **kwargs
    ):
        """
        流式生成文本 - 逐个token返回
        
        注意：在 NPU 上，流式生成可能导致 AICPU 异常，因此会自动回退到非流式生成
        
        Args:
            prompt: 输入提示文本
            max_new_tokens: 最大生成token数
            temperature: 采样温度
            top_p: nucleus采样参数
            top_k: top-k采样参数
            repetition_penalty: 重复惩罚系数
            do_sample: 是否使用采样
            stop_sequences: 停止序列列表
            **kwargs: 其他生成参数
        
        Yields:
            每个生成的token文本
        """
        try:
            # 编码输入
            inputs = self.tokenizer(
                prompt,
                return_tensors="pt",
                truncation=True,
                max_length=2048
            )
            
            # 移动到设备
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            # 准备生成参数
            generation_config = {
                "max_new_tokens": max_new_tokens,
                "temperature": temperature if do_sample else None,
                "top_p": top_p if do_sample else None,
                "top_k": top_k if do_sample else None,
                "repetition_penalty": repetition_penalty,
                "do_sample": do_sample,
                "pad_token_id": self.tokenizer.pad_token_id,
                "eos_token_id": self.tokenizer.eos_token_id,
                **kwargs
            }
            
            # 移除 None 值
            generation_config = {k: v for k, v in generation_config.items() if v is not None}
            
            # 处理停止序列
            stop_token_ids = []
            if stop_sequences:
                for stop_seq in stop_sequences:
                    stop_tokens = self.tokenizer.encode(stop_seq, add_special_tokens=False)
                    stop_token_ids.extend(stop_tokens)
            
            # 流式生成 - 使用更高效的方式
            input_ids = inputs['input_ids']
            attention_mask = inputs.get('attention_mask', None)
            generated_text = ""
            prompt_length = input_ids.shape[1]
            past_key_values = None
            
            with torch.no_grad():
                for step in range(max_new_tokens):
                    # 使用模型的 forward 方法生成下一个token（更高效）
                    if past_key_values is None:
                        # 第一次调用，使用完整的输入
                        outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=True)
                    else:
                        # 后续调用，只使用最后一个token
                        outputs = self.model(
                            input_ids=input_ids[:, -1:],
                            past_key_values=past_key_values,
                            use_cache=True
                        )
                    
                    # 获取logits
                    logits = outputs.logits[:, -1, :]
                    
                    # 应用采样策略
                    if do_sample and temperature is not None and temperature > 0:
                        # 温度采样
                        logits = logits / temperature
                        
                        # Top-k 过滤
                        if top_k is not None and top_k > 0:
                            indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
                            logits[indices_to_remove] = float('-inf')
                        
                        # Top-p 过滤
                        if top_p is not None and top_p < 1.0:
                            sorted_logits, sorted_indices = torch.sort(logits, descending=True)
                            cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
                            sorted_indices_to_remove = cumulative_probs > top_p
                            sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
                            sorted_indices_to_remove[..., 0] = 0
                            indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
                            logits[indices_to_remove] = float('-inf')
                        
                        # 采样
                        probs = torch.softmax(logits, dim=-1)
                        next_token_id = torch.multinomial(probs, num_samples=1)
                    else:
                        # 贪婪解码
                        next_token_id = torch.argmax(logits, dim=-1, keepdim=True)
                    
                    next_token_id = next_token_id.item()
                    
                    # 检查是否遇到停止token
                    if next_token_id in stop_token_ids or next_token_id == self.tokenizer.eos_token_id:
                        break
                    
                    # 解码新token
                    new_token_text = self.tokenizer.decode([next_token_id], skip_special_tokens=True)
                    generated_text += new_token_text
                    
                    # 检查是否遇到停止序列
                    if stop_sequences:
                        for stop_seq in stop_sequences:
                            if stop_seq in generated_text:
                                # 移除停止序列后的内容
                                generated_text = generated_text.split(stop_seq)[0]
                                # 返回当前token后停止
                                if new_token_text:
                                    yield new_token_text
                                return
                    
                    # 返回新token
                    if new_token_text:
                        yield new_token_text
                    
                    # 更新input_ids和past_key_values用于下一次生成
                    input_ids = torch.cat([input_ids, torch.tensor([[next_token_id]], device=self.device)], dim=1)
                    if attention_mask is not None:
                        attention_mask = torch.cat([attention_mask, torch.ones((1, 1), device=self.device, dtype=attention_mask.dtype)], dim=1)
                    past_key_values = outputs.past_key_values
                    
                    # 如果达到最大长度，停止
                    if input_ids.shape[1] >= prompt_length + max_new_tokens:
                        break
            
        except Exception as e:
            logger.error(f"流式生成失败: {str(e)}")
            raise
    
    def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_new_tokens: int = 512,
        temperature: float = 0.7,
        top_p: float = 0.9,
        top_k: int = 50,
        repetition_penalty: float = 1.1,
        do_sample: bool = True,
        stop_sequences: Optional[List[str]] = None,
        **kwargs
    ):
        """
        流式对话生成 - 逐个token返回
        
        注意：在 NPU 上，流式生成可能导致 AICPU 异常，因此会自动回退到非流式生成
        
        Args:
            messages: 消息列表，格式为 [{"role": "user", "content": "..."}, ...]
            max_new_tokens: 最大生成token数
            temperature: 采样温度
            top_p: nucleus采样参数
            top_k: top-k采样参数
            repetition_penalty: 重复惩罚系数
            do_sample: 是否使用采样
            stop_sequences: 停止序列列表
            **kwargs: 其他生成参数
        
        Yields:
            每个生成的token文本
        """
        try:
            # 构建对话提示
            if hasattr(self.tokenizer, 'apply_chat_template'):
                prompt = self.tokenizer.apply_chat_template(
                    messages,
                    tokenize=False,
                    add_generation_prompt=True
                )
            else:
                # 简单的对话格式构建
                prompt_parts = []
                for msg in messages:
                    role = msg.get("role", "user")
                    content = msg.get("content", "")
                    if role == "user":
                        prompt_parts.append(f"User: {content}")
                    elif role == "assistant":
                        prompt_parts.append(f"Assistant: {content}")
                    elif role == "system":
                        prompt_parts.append(f"System: {content}")
                prompt_parts.append("Assistant:")
                prompt = "\n".join(prompt_parts)
            
            # 使用流式生成方法
            for token_text in self.generate_stream(
                prompt=prompt,
                max_new_tokens=max_new_tokens,
                temperature=temperature,
                top_p=top_p,
                top_k=top_k,
                repetition_penalty=repetition_penalty,
                do_sample=do_sample,
                stop_sequences=stop_sequences,
                **kwargs
            ):
                yield token_text
            
        except Exception as e:
            logger.error(f"流式对话生成失败: {str(e)}")
            raise
    
    def chat(
        self,
        messages: List[Dict[str, str]],
        max_new_tokens: int = 512,
        temperature: float = 0.7,
        top_p: float = 0.9,
        **kwargs
    ) -> Dict:
        """
        对话生成 - 使用 LiteRT 优化
        
        Args:
            messages: 消息列表，格式为 [{"role": "user", "content": "..."}, ...]
            max_new_tokens: 最大生成token数
            temperature: 采样温度
            top_p: nucleus采样参数
            **kwargs: 其他生成参数
        
        Returns:
            生成结果字典
        """
        try:
            # 构建对话提示
            if hasattr(self.tokenizer, 'apply_chat_template'):
                prompt = self.tokenizer.apply_chat_template(
                    messages,
                    tokenize=False,
                    add_generation_prompt=True
                )
            else:
                # 简单的对话格式构建
                prompt_parts = []
                for msg in messages:
                    role = msg.get("role", "user")
                    content = msg.get("content", "")
                    if role == "user":
                        prompt_parts.append(f"User: {content}")
                    elif role == "assistant":
                        prompt_parts.append(f"Assistant: {content}")
                    elif role == "system":
                        prompt_parts.append(f"System: {content}")
                prompt_parts.append("Assistant:")
                prompt = "\n".join(prompt_parts)
            
            # 生成回复
            result = self.generate(
                prompt=prompt,
                max_new_tokens=max_new_tokens,
                temperature=temperature,
                top_p=top_p,
                **kwargs
            )
            
            return {
                "message": {
                    "role": "assistant",
                    "content": result.get("generated_text", "")
                },
                "usage": {
                    "prompt_tokens": len(self.tokenizer.encode(prompt)),
                    "completion_tokens": len(self.tokenizer.encode(result.get("generated_text", ""))),
                    "total_tokens": len(self.tokenizer.encode(prompt)) + len(self.tokenizer.encode(result.get("generated_text", "")))
                }
            }
            
        except Exception as e:
            logger.error(f"对话生成失败: {str(e)}")
            raise
    
    def complete(
        self,
        text: str,
        max_new_tokens: int = 100,
        temperature: float = 0.7,
        **kwargs
    ) -> str:
        """
        文本补全 - 使用 LiteRT 优化
        
        Args:
            text: 输入文本
            max_new_tokens: 最大生成token数
            temperature: 采样温度
            **kwargs: 其他生成参数
        
        Returns:
            补全后的文本
        """
        try:
            result = self.generate(
                prompt=text,
                max_new_tokens=max_new_tokens,
                temperature=temperature,
                **kwargs
            )
            
            if isinstance(result, dict) and "generated_text" in result:
                return result["generated_text"]
            else:
                return str(result)
            
        except Exception as e:
            logger.error(f"文本补全失败: {str(e)}")
            raise
