"""
BioGPT 包装器
直接集成微软原版 BioGPT 医疗语言模型
"""
import asyncio
from typing import Optional, Dict, Any
from pathlib import Path
import torch
from app.core.logger import app_logger


class BioGPTWrapper:
    """BioGPT 模型包装器"""
    
    _instance = None
    _model = None
    _tokenizer = None
    _device = None
    
    def __new__(cls):
        """单例模式，避免重复加载模型"""
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance
    
    def __init__(self):
        """初始化（仅首次调用时执行）"""
        if self._model is None:
            self._initialize_model()
    
    def _initialize_model(self):
        """初始化 BioGPT 模型"""
        try:
            from transformers import BioGptTokenizer, BioGptForCausalLM
            
            app_logger.info("🧬 开始加载 BioGPT 模型...")
            
            # 检测设备（GPU 优先）
            if torch.cuda.is_available():
                self._device = "cuda"
                gpu_name = torch.cuda.get_device_name(0)
                gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
                app_logger.info(f"✅ 检测到 GPU: {gpu_name} ({gpu_memory:.1f}GB VRAM)")
            else:
                self._device = "cpu"
                app_logger.warning("⚠️  未检测到 GPU，使用 CPU 推理（速度较慢）")
            
            # 加载模型和分词器
            model_name = "microsoft/biogpt"
            app_logger.info(f"📥 从 Hugging Face 加载模型: {model_name}")
            
            try:
                # 尝试从本地缓存加载
                self._tokenizer = BioGptTokenizer.from_pretrained(
                    model_name,
                    local_files_only=False  # 允许下载
                )
                self._model = BioGptForCausalLM.from_pretrained(
                    model_name,
                    local_files_only=False,
                    torch_dtype=torch.float16 if self._device == "cuda" else torch.float32,
                    low_cpu_mem_usage=True
                )
            except Exception as download_error:
                app_logger.error(f"❌ 模型下载失败: {download_error}")
                raise Exception(
                    f"BioGPT 模型加载失败。\n"
                    f"原因: {download_error}\n"
                    f"请检查网络连接或手动下载模型到缓存目录。"
                )
            
            # 移动到 GPU
            self._model.to(self._device)
            self._model.eval()  # 设置为评估模式
            
            # 显示模型信息
            param_count = sum(p.numel() for p in self._model.parameters()) / 1e9
            app_logger.info(f"✅ BioGPT 加载成功！")
            app_logger.info(f"   参数量: {param_count:.2f}B")
            app_logger.info(f"   设备: {self._device}")
            app_logger.info(f"   精度: {'FP16' if self._device == 'cuda' else 'FP32'}")
            
        except ImportError as e:
            app_logger.error(f"❌ 缺少依赖: {e}")
            raise ImportError(
                "请安装 BioGPT 依赖:\n"
                "pip install transformers torch sentencepiece accelerate"
            )
        except Exception as e:
            app_logger.error(f"❌ BioGPT 初始化失败: {e}", exc_info=True)
            raise
    
    async def generate(
        self,
        prompt: str,
        max_length: int = 1024,
        temperature: float = 0.2,
        top_p: float = 0.9,
        top_k: int = 50,
        num_return_sequences: int = 1
    ) -> str:
        """
        生成文本（异步包装器）
        
        Args:
            prompt: 输入提示词
            max_length: 最大生成长度
            temperature: 温度（0.0-1.0，越低越稳定）
            top_p: 核采样参数
            top_k: Top-K 采样
            num_return_sequences: 返回序列数
            
        Returns:
            生成的文本
        """
        # 在后台线程中运行同步生成（避免阻塞事件循环）
        loop = asyncio.get_event_loop()
        result = await loop.run_in_executor(
            None,
            self._generate_sync,
            prompt,
            max_length,
            temperature,
            top_p,
            top_k,
            num_return_sequences
        )
        return result
    
    def _generate_sync(
        self,
        prompt: str,
        max_length: int,
        temperature: float,
        top_p: float,
        top_k: int,
        num_return_sequences: int
    ) -> str:
        """
        同步生成文本
        
        Args:
            同上
            
        Returns:
            生成的文本
        """
        try:
            # 编码输入
            inputs = self._tokenizer(
                prompt,
                return_tensors="pt",
                max_length=512,  # 限制输入长度
                truncation=True
            ).to(self._device)
            
            # 生成
            with torch.no_grad():
                outputs = self._model.generate(
                    **inputs,
                    max_length=max_length,
                    temperature=temperature,
                    top_p=top_p,
                    top_k=top_k,
                    num_return_sequences=num_return_sequences,
                    do_sample=True,  # 启用采样
                    pad_token_id=self._tokenizer.eos_token_id,
                    eos_token_id=self._tokenizer.eos_token_id,
                    early_stopping=True
                )
            
            # 解码输出
            generated_text = self._tokenizer.decode(
                outputs[0],
                skip_special_tokens=True
            )
            
            # 去掉输入的 prompt（只返回生成的部分）
            if generated_text.startswith(prompt):
                generated_text = generated_text[len(prompt):].strip()
            
            return generated_text
            
        except Exception as e:
            app_logger.error(f"❌ BioGPT 生成失败: {e}", exc_info=True)
            raise
    
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        if self._model is None:
            return {"status": "未加载"}
        
        param_count = sum(p.numel() for p in self._model.parameters())
        
        return {
            "status": "已加载",
            "model_name": "microsoft/biogpt",
            "device": self._device,
            "parameters": f"{param_count / 1e9:.2f}B",
            "dtype": str(next(self._model.parameters()).dtype),
            "gpu_available": torch.cuda.is_available(),
            "gpu_name": torch.cuda.get_device_name(0) if torch.cuda.is_available() else None
        }
    
    def unload_model(self):
        """卸载模型（释放显存）"""
        if self._model is not None:
            del self._model
            del self._tokenizer
            self._model = None
            self._tokenizer = None
            
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            
            app_logger.info("🗑️  BioGPT 模型已卸载")


# 全局单例
biogpt_instance = None


def get_biogpt() -> BioGPTWrapper:
    """获取 BioGPT 单例"""
    global biogpt_instance
    if biogpt_instance is None:
        biogpt_instance = BioGPTWrapper()
    return biogpt_instance

