"""文字转图片生成器

基于 Stable Diffusion 的文字生成图片核心实现。"""

import os
import torch
from typing import Optional, Union, List
from PIL import Image
from diffusers import StableDiffusionPipeline
from diffusers.schedulers import DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler

# 性能监控和错误处理模块（模拟导入）
try:
    from ..utils.performance import (
        monitor_performance, memory_efficient, GPUManager, 
        MemoryManager, performance_context
    )
    from ..utils.exceptions import (
        ModelLoadError, ImageGenerationError, ValidationError,
        handle_gpu_error, handle_model_error, validate_prompt,
        validate_image_size, create_error_context
    )
except ImportError:
    # 如果模块不存在，提供默认实现
    def monitor_performance(name):
        def decorator(func):
            return func
        return decorator
    
    def memory_efficient(clear_cache=False):
        def decorator(func):
            return func
        return decorator
    
    def handle_gpu_error(func):
        return func
    
    def handle_model_error(func):
        return func
    
    def create_error_context(name):
        def decorator(func):
            return func
        return decorator
    
    def validate_prompt(prompt):
        if not prompt or not isinstance(prompt, str):
            raise ValueError("Invalid prompt")
    
    def validate_image_size(width, height):
        if width <= 0 or height <= 0:
            raise ValueError("Invalid image size")
    
    class GPUManager:
        @staticmethod
        def optimize_for_inference():
            pass
        
        @staticmethod
        def set_memory_fraction(fraction):
            pass
        
        @staticmethod
        def get_optimal_device():
            if torch.cuda.is_available():
                return f"cuda:{torch.cuda.current_device()}"
            return "cpu"
    
    class MemoryManager:
        @staticmethod
        def check_memory_threshold(threshold_mb):
            return False
        
        @staticmethod
        def clear_cache():
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
        
        @staticmethod
        def get_memory_usage():
            return {'rss': 0}
    
    class performance_context:
        def __init__(self, name):
            self.name = name
        
        def __enter__(self):
            return self
        
        def __exit__(self, *args):
            pass
    
    class ModelLoadError(Exception):
        def __init__(self, message, details=None):
            super().__init__(message)
            self.details = details
    
    class ImageGenerationError(Exception):
        def __init__(self, message, details=None):
            super().__init__(message)
            self.details = details
    
    class ValidationError(Exception):
        def __init__(self, message, details=None):
            super().__init__(message)
            self.details = details


class TextToImage:
    """文字转图片生成器
    
    使用 Stable Diffusion 模型将文本描述转换为图像。
    """
    
    @create_error_context("TextToImage.__init__")
    def __init__(
        self,
        model_id: str = "runwayml/stable-diffusion-v1-5",
        device: Optional[str] = None,
        torch_dtype: torch.dtype = torch.float16,
        use_safetensors: bool = True
    ):
        """初始化文字转图片生成器
        
        Args:
            model_id: Hugging Face 模型ID或本地模型路径
            device: 计算设备 ('cuda', 'cpu', 'mps')
            torch_dtype: 数据类型，默认float16节省显存
            use_safetensors: 是否使用safetensors格式
        """
        self.model_id = model_id
        self.device = device or self._get_device()
        # CPU模式下强制使用float32，避免数据类型错误
        if self.device == 'cpu':
            self.torch_dtype = torch.float32
        else:
            self.torch_dtype = torch_dtype
        self.use_safetensors = use_safetensors
        
        # 优化GPU设置
        if self.device.startswith('cuda'):
            GPUManager.optimize_for_inference()
            GPUManager.set_memory_fraction(0.8)
        
        # 初始化管道
        self.pipeline = None
        self._load_pipeline()
        
    def _get_device(self) -> str:
        """自动检测可用设备"""
        optimal_device = GPUManager.get_optimal_device()
        
        if optimal_device != 'cpu':
            print(f"使用最优GPU设备: {optimal_device}")
        else:
            print("使用CPU (GPU不可用或最优)")
        
        return optimal_device
    
    @handle_model_error
    @monitor_performance("model_loading")
    @memory_efficient(clear_cache=True)
    def _load_pipeline(self):
        """加载 Stable Diffusion 管道"""
        try:
            print(f"正在加载模型: {self.model_id}")
            print(f"使用设备: {self.device}")
            
            with performance_context(f"load_model_{self.model_id}"):
                # 检查内存是否足够
                if MemoryManager.check_memory_threshold(2000):  # 2GB阈值
                    print("检测到高内存使用，正在清理缓存")
                    MemoryManager.clear_cache()
                
                # 加载管道
                self.pipeline = StableDiffusionPipeline.from_pretrained(
                    self.model_id,
                    torch_dtype=self.torch_dtype,
                    use_safetensors=self.use_safetensors,
                    safety_checker=None,  # 可选：禁用安全检查器以节省内存
                    requires_safety_checker=False
                )
                
                # 移动到指定设备
                self.pipeline = self.pipeline.to(self.device)
                
                # 启用内存优化
                if self.device == "cuda":
                    self.pipeline.enable_attention_slicing()
                    # 如果显存不足，可以启用以下选项
                    # self.pipeline.enable_sequential_cpu_offload()
                    # self.pipeline.enable_model_cpu_offload()
            
            print("模型加载完成！")
            
        except Exception as e:
            print(f"模型加载失败: {e}")
            raise ModelLoadError(f"Failed to load model {self.model_id}", details={'model_id': self.model_id, 'device': self.device})
    
    @handle_gpu_error
    @monitor_performance("image_generation")
    @memory_efficient(clear_cache=False)
    @create_error_context("image_generation")
    def generate(
        self,
        prompt: str,
        negative_prompt: Optional[str] = None,
        width: int = 512,
        height: int = 512,
        num_inference_steps: int = 50,
        guidance_scale: float = 7.5,
        seed: Optional[int] = None,
        num_images: int = 1
    ) -> Union[Image.Image, List[Image.Image]]:
        """生成图片
        
        Args:
            prompt: 正向提示词
            negative_prompt: 负向提示词
            width: 图片宽度
            height: 图片高度
            num_inference_steps: 推理步数
            guidance_scale: 引导强度
            seed: 随机种子
            num_images: 生成图片数量
            
        Returns:
            生成的图片或图片列表
        """
        if self.pipeline is None:
            raise ImageGenerationError("模型未加载，请先调用 _load_pipeline()")
        
        # 验证输入参数
        validate_prompt(prompt)
        validate_image_size(width, height)
        
        if num_images < 1 or num_images > 10:
            raise ValidationError(
                f"num_images must be between 1 and 10, got {num_images}",
                details={'num_images': num_images}
            )
        
        # 设置随机种子
        if seed is not None:
            torch.manual_seed(seed)
            if torch.cuda.is_available():
                torch.cuda.manual_seed(seed)
        
        # 生成参数
        generation_kwargs = {
            "prompt": prompt,
            "negative_prompt": negative_prompt,
            "width": width,
            "height": height,
            "num_inference_steps": num_inference_steps,
            "guidance_scale": guidance_scale,
            "num_images_per_prompt": num_images
        }
        
        print(f"正在生成图片...")
        print(f"提示词: {prompt}")
        print(f"参数: {width}x{height}, steps={num_inference_steps}, scale={guidance_scale}")
        
        try:
            with performance_context(f"generate_{num_images}_images") as monitor:
                # 检查内存使用情况
                initial_memory = MemoryManager.get_memory_usage()
                
                # 生成图片
                if self.device == 'cpu':
                    # CPU模式下不使用autocast或使用float32
                    result = self.pipeline(**generation_kwargs)
                else:
                    with torch.autocast(self.device):
                        result = self.pipeline(**generation_kwargs)
                
                images = result.images
                
                # 记录内存使用情况
                final_memory = MemoryManager.get_memory_usage()
                memory_diff = (final_memory['rss'] - initial_memory['rss']) / (1024 * 1024)
                print(f"生成过程中内存使用: {memory_diff:+.2f}MB")
            
            print(f"生成完成！共生成 {len(images)} 张图片")
            
            # 返回单张图片或图片列表
            if num_images == 1:
                return images[0]
            else:
                return images
                
        except Exception as e:
            print(f"图片生成失败: {e}")
            raise ImageGenerationError(
                f"Failed to generate image: {str(e)}",
                details={
                    'prompt': prompt[:100],
                    'width': width,
                    'height': height,
                    'num_images': num_images
                }
            )
    
    def save_image(
        self,
        image: Image.Image,
        output_path: str,
        quality: int = 95
    ):
        """保存图片
        
        Args:
            image: PIL图片对象
            output_path: 输出路径
            quality: 图片质量 (1-100)
        """
        try:
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 保存图片
            if output_path.lower().endswith('.jpg') or output_path.lower().endswith('.jpeg'):
                image.save(output_path, "JPEG", quality=quality)
            else:
                image.save(output_path)
            
            print(f"图片已保存到: {output_path}")
            
        except Exception as e:
            print(f"图片保存失败: {e}")
            raise
    
    def set_scheduler(self, scheduler_type: str = "ddim"):
        """设置调度器
        
        Args:
            scheduler_type: 调度器类型 ('ddim', 'pndm', 'lms')
        """
        if self.pipeline is None:
            raise RuntimeError("模型未加载")
        
        scheduler_map = {
            "ddim": DDIMScheduler,
            "pndm": PNDMScheduler,
            "lms": LMSDiscreteScheduler
        }
        
        if scheduler_type not in scheduler_map:
            raise ValueError(f"不支持的调度器类型: {scheduler_type}")
        
        scheduler_class = scheduler_map[scheduler_type]
        self.pipeline.scheduler = scheduler_class.from_config(
            self.pipeline.scheduler.config
        )
        
        print(f"调度器已设置为: {scheduler_type}")
    
    def get_model_info(self) -> dict:
        """获取模型信息"""
        return {
            "model_id": self.model_id,
            "device": self.device,
            "torch_dtype": str(self.torch_dtype),
            "use_safetensors": self.use_safetensors,
            "pipeline_loaded": self.pipeline is not None
        }
    
    def clear_cache(self):
        """清理GPU缓存"""
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            print("GPU缓存已清理")