"""
AudioFly 模型加载器
使用 CPU 推理
参考: https://ai.gitcode.com/ifly_opensource/AudioFly/blob/main/README.md
AudioFly 是文本到音频生成模型（LDM架构）
"""
import os
import sys
import torch
import logging
import yaml
from typing import Optional
from modelscope import snapshot_download, Model

logger = logging.getLogger(__name__)


class AudioFlyModelLoader:
    """AudioFly模型加载器"""
    
    def __init__(self, model_id: str = "iflytek/AudioFly", cache_dir: Optional[str] = None, local_model_path: Optional[str] = None):
        self.model_id = model_id
        self.cache_dir = cache_dir or os.getenv("MODEL_CACHE_DIR", "/app/models")
        self.local_model_path = local_model_path or os.getenv("LOCAL_MODEL_PATH", None)
        self.device = None
        self.model = None
        self.processor = None
        
    def _get_device(self):
        """获取计算设备（强制使用CPU）"""
        # AudioFly 使用 CPU 推理
        device = "cpu"
        logger.info("使用CPU推理（AudioFly不支持NPU）")
        return device
    
    def load_model(self):
        """加载AudioFly模型
        参考: https://ai.gitcode.com/ifly_opensource/AudioFly/blob/main/README.md
        
        AudioFly使用LDM架构，加载方式：
        1. 设置PYTHONPATH包含项目根目录
        2. 从config/config.yaml加载配置
        3. 使用ldm.utils.util.instantiate_from_config创建模型
        4. 加载checkpoint: models/ldm/model.ckpt
        """
        try:
            self.device = self._get_device()
            torch_dtype = torch.float16 if self.device != "cpu" else torch.float32
            
            # 检查是否有本地模型路径（AudioFly项目目录）
            if self.local_model_path and os.path.exists(self.local_model_path):
                logger.info(f"从本地路径加载AudioFly项目: {self.local_model_path}")
                project_dir = self.local_model_path
            else:
                logger.info(f"从ModelScope下载模型: {self.model_id}")
                # 从ModelScope下载模型
                project_dir = snapshot_download(
                    self.model_id,
                    cache_dir=self.cache_dir
                )
                logger.info(f"模型下载完成: {project_dir}")
            
            # 检查项目目录结构
            logger.info(f"AudioFly项目目录: {project_dir}")
            if os.path.exists(project_dir):
                files = os.listdir(project_dir)
                logger.info(f"项目目录文件: {files[:10]}...")
            
            # 设置PYTHONPATH，确保可以导入ldm模块
            if project_dir not in sys.path:
                sys.path.insert(0, project_dir)
                logger.info(f"已将项目目录添加到PYTHONPATH: {project_dir}")
            
            # 检查并安装AudioFly项目的依赖（如果存在requirements.txt）
            requirements_path = os.path.join(project_dir, "requirements.txt")
            if os.path.exists(requirements_path):
                logger.info(f"发现AudioFly项目requirements.txt: {requirements_path}")
                logger.info("请确保已安装AudioFly项目的依赖: pip install -r requirements.txt")
                # 注意：在容器中，依赖应该在构建时或启动时安装
                # 这里只做提示，不自动安装（避免权限问题）
            
            # 按照README的方式加载AudioFly模型
            try:
                # 导入必要的模块
                from ldm.utils.util import instantiate_from_config
                logger.info("成功导入 ldm.utils.util")
                
                # 加载配置文件
                config_path = os.path.join(project_dir, "config", "config.yaml")
                if not os.path.exists(config_path):
                    # 尝试其他可能的配置路径
                    alt_configs = [
                        os.path.join(project_dir, "config.yaml"),
                        os.path.join(project_dir, "config", "model_config.yaml")
                    ]
                    for alt_path in alt_configs:
                        if os.path.exists(alt_path):
                            config_path = alt_path
                            break
                
                if not os.path.exists(config_path):
                    raise FileNotFoundError(
                        f"未找到配置文件。尝试的路径: {config_path}, "
                        f"项目目录: {project_dir}"
                    )
                
                logger.info(f"加载配置文件: {config_path}")
                with open(config_path, "r") as f:
                    configs = yaml.load(f, Loader=yaml.FullLoader)
                
                # 从配置创建模型
                if "model" not in configs:
                    raise ValueError("配置文件中未找到 'model' 键")
                
                logger.info("从配置实例化模型...")
                self.model = instantiate_from_config(configs["model"])
                logger.info("模型实例化成功")
                
                # 加载checkpoint
                checkpoint_path = os.path.join(project_dir, "models", "ldm", "model.ckpt")
                if not os.path.exists(checkpoint_path):
                    # 尝试其他可能的checkpoint路径
                    alt_checkpoints = [
                        os.path.join(project_dir, "models", "model.ckpt"),
                        os.path.join(project_dir, "checkpoints", "model.ckpt"),
                        os.path.join(project_dir, "model.ckpt")
                    ]
                    for alt_path in alt_checkpoints:
                        if os.path.exists(alt_path):
                            checkpoint_path = alt_path
                            break
                
                if not os.path.exists(checkpoint_path):
                    raise FileNotFoundError(
                        f"未找到模型checkpoint。尝试的路径: {checkpoint_path}, "
                        f"项目目录: {project_dir}"
                    )
                
                logger.info(f"加载模型checkpoint: {checkpoint_path}")
                checkpoint = torch.load(checkpoint_path, map_location="cpu")
                
                # 加载权重（strict=False允许部分匹配）
                if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
                    state_dict = checkpoint["state_dict"]
                else:
                    state_dict = checkpoint
                
                missing_keys, unexpected_keys = self.model.load_state_dict(
                    state_dict, strict=False
                )
                if missing_keys:
                    logger.warning(f"加载checkpoint时缺少的键: {missing_keys[:5]}...")
                if unexpected_keys:
                    logger.warning(f"加载checkpoint时意外的键: {unexpected_keys[:5]}...")
                
                logger.info("模型权重加载成功")
                
                # 设置为评估模式
                self.model.eval()
                
                # 移动到设备（CPU）
                if hasattr(self.model, 'to'):
                    self.model = self.model.to("cpu")
                    logger.info("模型已移动到CPU设备")
                
                # AudioFly不需要processor
                self.processor = None
                
                logger.info("AudioFly模型加载成功")
                return True
                
            except ImportError as e:
                logger.error(f"导入ldm模块失败: {e}")
                logger.error("请确保已安装AudioFly项目的依赖: pip install -r requirements.txt")
                raise
            except Exception as e:
                logger.error(f"加载AudioFly模型失败: {e}")
                raise
            
            # 移动到设备
            if hasattr(self.model, 'to'):
                if self.device == "npu:0":
                    self.model = self.model.to(self.device)
                elif self.device == "cuda":
                    self.model = self.model.to(self.device)
            
            if hasattr(self.model, 'eval'):
                self.model.eval()
            
            logger.info("模型加载成功")
            return True
            
        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            raise
    
    def get_model(self):
        """获取模型"""
        if self.model is None:
            raise RuntimeError("模型未加载，请先调用load_model()")
        return self.model
    
    def get_processor(self):
        """获取处理器"""
        return self.processor
    
    def get_device(self):
        """获取设备"""
        return self.device


