"""
Z-Image-Turbo 模型加载器
支持昇腾910B NPU加速
"""
import os
import torch
import logging
from typing import Optional
import gc

# 条件导入torch_npu
try:
    import torch_npu
    NPU_AVAILABLE = True
except ImportError:
    NPU_AVAILABLE = False

logger = logging.getLogger(__name__)

# NPU 设备配置（从环境变量读取，默认0）
NPU_DEVICE_ID = 0
NPU_NAME = f"npu:{NPU_DEVICE_ID}"
_npu_initialized = False


def init_npu_once():
    """初始化 NPU（只执行一次）"""
    global _npu_initialized
    if _npu_initialized:
        return True
    
    try:
        import torch_npu
        torch.npu.set_device(NPU_NAME)
        _npu_initialized = True
        logger.info(f"NPU initialized: {NPU_NAME}")
        return True
    except Exception as e:
        if "Repeated initialization" in str(e) or "100002" in str(e):
            _npu_initialized = True
            return True
        logger.warning(f"Failed to init NPU: {e}")
        return False


def ensure_npu_context():
    """确保 NPU 上下文已设置"""
    init_npu_once()


class ZImageTurboModelLoader:
    """Z-Image-Turbo 模型加载器"""
    
    def __init__(
        self, 
        model_id: str = "Tongyi-MAI/Z-Image-Turbo",
        cache_dir: Optional[str] = None,
        local_model_path: Optional[str] = None
    ):
        self.model_id = model_id
        self.cache_dir = cache_dir or os.getenv("MODEL_CACHE_DIR", "/app/models")
        self.local_model_path = local_model_path or os.getenv("MODEL_PATH", None)
        self.device = None
        self.pipe = None
        
    def _get_device(self):
        """获取计算设备"""
        if NPU_AVAILABLE and hasattr(torch, 'npu') and torch.npu.is_available():
            ensure_npu_context()
            device = NPU_NAME
            logger.info("使用昇腾NPU加速")
        elif torch.cuda.is_available():
            device = "cuda"
            logger.info("使用CUDA加速")
        else:
            device = "cpu"
            logger.info("使用CPU")
        return device
    
    def load_model(
        self,
        torch_dtype: Optional[torch.dtype] = None,
        enable_cpu_offload: bool = False,
        compile_model: bool = False,
        attention_backend: str = "sdpa"
    ):
        """加载 Z-Image-Turbo 模型"""
        try:
            from diffusers import ZImagePipeline
            
            self.device = self._get_device()
            
            # 确定数据类型
            if torch_dtype is None:
                if 'npu' in str(self.device):
                    use_bf16 = os.getenv("USE_BF16", "true").lower() == "true"
                    torch_dtype = torch.bfloat16 if use_bf16 else torch.float16
                elif 'cuda' in str(self.device):
                    torch_dtype = torch.bfloat16
                else:
                    torch_dtype = torch.float32
            
            logger.info(f"Loading model with dtype: {torch_dtype}")
            
            # 确定模型路径
            model_path = self.local_model_path or self.model_id
            if self.local_model_path and os.path.exists(self.local_model_path):
                logger.info(f"Loading model from local path: {model_path}")
            else:
                logger.info(f"Loading model from HuggingFace: {model_path}")
            
            # 加载 pipeline
            logger.info("Loading ZImagePipeline...")
            self.pipe = ZImagePipeline.from_pretrained(
                model_path,
                torch_dtype=torch_dtype,
                low_cpu_mem_usage=False,
            )
            
            # 移动到设备
            if enable_cpu_offload:
                logger.info("Enabling CPU offloading...")
                self.pipe.enable_model_cpu_offload()
            else:
                self.pipe = self.pipe.to(self.device)
            
            # 设置 attention 后端
            if hasattr(self.pipe, 'transformer'):
                if attention_backend == "flash":
                    try:
                        self.pipe.transformer.set_attention_backend("flash")
                        logger.info("Using Flash Attention 2")
                    except Exception as e:
                        logger.warning(f"Failed to set Flash Attention 2: {e}")
                elif attention_backend == "_flash_3":
                    try:
                        self.pipe.transformer.set_attention_backend("_flash_3")
                        logger.info("Using Flash Attention 3")
                    except Exception as e:
                        logger.warning(f"Failed to set Flash Attention 3: {e}")
                else:
                    logger.info("Using SDPA (default)")
            
            # 编译模型（可选，加速推理）
            if compile_model and hasattr(self.pipe, 'transformer'):
                try:
                    logger.info("Compiling model (first run will be slower)...")
                    self.pipe.transformer.compile()
                    logger.info("Model compiled successfully")
                except Exception as e:
                    logger.warning(f"Failed to compile model: {e}")
            
            # 清理内存
            if 'npu' in str(self.device):
                torch.npu.empty_cache()
            elif 'cuda' in str(self.device):
                torch.cuda.empty_cache()
            gc.collect()
            
            logger.info(f"Model loaded successfully on {self.device}, dtype={torch_dtype}")
            
        except Exception as e:
            logger.error(f"Failed to load model: {e}")
            import traceback
            traceback.print_exc()
            raise
    
    def get_pipeline(self):
        """获取 pipeline"""
        if self.pipe is None:
            raise ValueError("Model not loaded. Call load_model() first.")
        return self.pipe
    
    def get_device(self):
        """获取设备"""
        return self.device

