#!/usr/bin/env python3
"""
模型下载器
专门用于下载和管理 dialogue_service 所需的所有 AI 模型
确保在服务启动前所有模型都已准备就绪
"""

import os
import sys
import logging
import shutil
import hashlib
import json
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import tempfile
import urllib.request
import urllib.parse
from concurrent.futures import ThreadPoolExecutor, as_completed
import time

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class ModelDownloader:
    """模型下载器"""
    
    def __init__(self, models_dir: Optional[Path] = None):
        """
        初始化模型下载器
        
        Args:
            models_dir: 模型存储目录，默认为当前目录下的 models 文件夹
        """
        self.models_dir = models_dir or Path(__file__).parent / "models"
        self.models_dir.mkdir(parents=True, exist_ok=True)
        
        # 模型配置 - 完整的下载信息
        self.model_configs = {
            "paraformer-zh": {
                "display_name": "中文语音识别模型 (Paraformer)",
                "description": "基于Paraformer的中文语音识别模型，支持实时转录",
                "funasr_model_name": "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
                "modelscope_repo": "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
                "required_files": [
                    "config.yaml",
                    "model.pt", 
                    "am.mvn",
                    "tokens.json",
                    "README.md"
                ],
                "size_estimate": "850MB",
                "priority": 1
            },
            "fsmn-vad": {
                "display_name": "语音活动检测模型 (FSMN-VAD)",
                "description": "基于FSMN的语音活动检测模型，用于检测语音段落",
                "funasr_model_name": "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
                "modelscope_repo": "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
                "required_files": [
                    "config.yaml",
                    "model.pt",
                    "am.mvn",
                    "README.md"
                ],
                "size_estimate": "120MB",
                "priority": 2
            },
            "ct-punc": {
                "display_name": "标点符号预测模型 (CT-Punc)",
                "description": "基于CT-Transformer的标点符号预测模型",
                "funasr_model_name": "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
                "modelscope_repo": "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
                "required_files": [
                    "config.yaml",
                    "model.pt",
                    "tokens.json",
                    "README.md"
                ],
                "size_estimate": "380MB",
                "priority": 3
            },
            "cam++": {
                "display_name": "说话人识别模型 (CAM++)",
                "description": "基于CAM++的说话人识别和分离模型",
                "funasr_model_name": "iic/speech_campplus_sv_zh-cn_16k-common",
                "modelscope_repo": "iic/speech_campplus_sv_zh-cn_16k-common",
                "required_files": [
                    "config.yaml",
                    "campplus_cn_common.bin",
                    "README.md"
                ],
                "size_estimate": "180MB",
                "priority": 4
            }
        }
        
        # 状态文件，记录下载状态
        self.status_file = self.models_dir / ".download_status.json"
        self.download_status = self._load_status()
    
    def _load_status(self) -> Dict:
        """加载下载状态"""
        if self.status_file.exists():
            try:
                with open(self.status_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            except Exception as e:
                logger.warning(f"加载状态文件失败: {e}")
        
        return {
            "last_update": None,
            "models": {}
        }
    
    def _save_status(self):
        """保存下载状态"""
        try:
            with open(self.status_file, 'w', encoding='utf-8') as f:
                json.dump(self.download_status, f, indent=2, ensure_ascii=False)
        except Exception as e:
            logger.error(f"保存状态文件失败: {e}")
    
    def check_model_integrity(self, model_name: str) -> Tuple[bool, List[str]]:
        """
        检查模型完整性
        
        Args:
            model_name: 模型名称
            
        Returns:
            (是否完整, 缺失文件列表)
        """
        if model_name not in self.model_configs:
            return False, [f"未知模型: {model_name}"]
        
        config = self.model_configs[model_name]
        model_dir = self.models_dir / model_name
        
        if not model_dir.exists():
            return False, ["模型目录不存在"]
        
        missing_files = []
        for required_file in config["required_files"]:
            file_path = model_dir / required_file
            if not file_path.exists():
                missing_files.append(required_file)
        
        return len(missing_files) == 0, missing_files
    
    def download_model_via_funasr(self, model_name: str, force: bool = False) -> bool:
        """
        通过 FunASR 下载模型
        
        Args:
            model_name: 模型名称
            force: 是否强制重新下载
            
        Returns:
            是否下载成功
        """
        if model_name not in self.model_configs:
            logger.error(f"未知模型: {model_name}")
            return False
        
        config = self.model_configs[model_name]
        model_dir = self.models_dir / model_name
        
        # 检查是否已存在且完整
        if not force:
            is_complete, missing = self.check_model_integrity(model_name)
            if is_complete:
                logger.info(f"✅ 模型 {config['display_name']} 已存在且完整")
                return True
            else:
                logger.info(f"⚠️ 模型 {config['display_name']} 不完整，缺失: {missing}")
        
        logger.info(f"📥 开始下载模型: {config['display_name']} ({config['size_estimate']})")
        
        try:
            # 导入 FunASR
            from funasr import AutoModel
            
            # 创建临时目录
            with tempfile.TemporaryDirectory() as temp_dir:
                logger.info(f"   使用临时目录: {temp_dir}")
                
                # 设置缓存目录到临时目录
                os.environ['MODELSCOPE_CACHE'] = temp_dir
                
                # 下载模型
                logger.info(f"   正在下载: {config['funasr_model_name']}")
                model = AutoModel(
                    model=config['funasr_model_name'],
                    model_revision="master",
                    disable_update=False,  # 允许下载
                    cache_dir=temp_dir
                )
                
                # 模型下载完成后，查找实际的模型文件
                cache_path = Path(temp_dir)
                model_cache_dirs = list(cache_path.rglob("*pytorch*")) + list(cache_path.rglob("*common*"))
                
                if not model_cache_dirs:
                    # 尝试查找其他可能的目录
                    model_cache_dirs = [d for d in cache_path.iterdir() if d.is_dir()]
                
                # 找到最可能的模型目录
                source_dir = None
                for cache_dir in model_cache_dirs:
                    if any((cache_dir / f).exists() for f in config["required_files"]):
                        source_dir = cache_dir
                        break
                
                if not source_dir:
                    logger.error(f"   无法找到下载的模型文件")
                    return False
                
                logger.info(f"   找到模型文件: {source_dir}")
                
                # 创建目标目录
                model_dir.mkdir(parents=True, exist_ok=True)
                
                # 复制模型文件
                logger.info(f"   复制模型文件到: {model_dir}")
                for item in source_dir.iterdir():
                    if item.is_file():
                        shutil.copy2(item, model_dir)
                    elif item.is_dir():
                        shutil.copytree(item, model_dir / item.name, dirs_exist_ok=True)
                
            # 验证下载结果
            is_complete, missing = self.check_model_integrity(model_name)
            if is_complete:
                logger.info(f"✅ 模型 {config['display_name']} 下载完成")
                
                # 更新状态
                self.download_status["models"][model_name] = {
                    "status": "complete",
                    "timestamp": time.time(),
                    "method": "funasr"
                }
                self._save_status()
                return True
            else:
                logger.error(f"❌ 模型下载后仍不完整，缺失: {missing}")
                return False
                
        except ImportError:
            logger.error("❌ FunASR 未安装，无法使用自动下载功能")
            logger.info("   请先安装: pip install funasr")
            return False
        except Exception as e:
            logger.error(f"❌ 下载模型失败: {str(e)}")
            return False
    
    def download_all_models(self, force: bool = False, max_workers: int = 1) -> Dict[str, bool]:
        """
        下载所有必需的模型
        
        Args:
            force: 是否强制重新下载
            max_workers: 最大并发下载数（建议设为1避免内存问题）
            
        Returns:
            每个模型的下载结果
        """
        logger.info("🚀 开始下载所有必需的模型...")
        
        # 按优先级排序
        models_to_download = sorted(
            self.model_configs.items(),
            key=lambda x: x[1]["priority"]
        )
        
        results = {}
        
        if max_workers == 1:
            # 串行下载（推荐，避免内存问题）
            for model_name, config in models_to_download:
                logger.info(f"\n📦 处理模型 {model_name} ({config['priority']}/{len(models_to_download)})")
                results[model_name] = self.download_model_via_funasr(model_name, force)
        else:
            # 并行下载
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                future_to_model = {
                    executor.submit(self.download_model_via_funasr, model_name, force): model_name
                    for model_name, _ in models_to_download
                }
                
                for future in as_completed(future_to_model):
                    model_name = future_to_model[future]
                    try:
                        results[model_name] = future.result()
                    except Exception as e:
                        logger.error(f"下载模型 {model_name} 时出错: {e}")
                        results[model_name] = False
        
        # 汇总结果
        successful = sum(1 for success in results.values() if success)
        total = len(results)
        
        logger.info(f"\n📊 下载完成统计:")
        logger.info(f"   成功: {successful}/{total}")
        logger.info(f"   失败: {total - successful}/{total}")
        
        for model_name, success in results.items():
            config = self.model_configs[model_name]
            status = "✅ 成功" if success else "❌ 失败"
            logger.info(f"   {config['display_name']}: {status}")
        
        if successful == total:
            logger.info("🎉 所有模型下载完成！")
            self.download_status["last_update"] = time.time()
            self._save_status()
        else:
            logger.warning(f"⚠️ {total - successful} 个模型下载失败")
        
        return results
    
    def verify_all_models(self) -> Dict[str, Tuple[bool, List[str]]]:
        """
        验证所有模型的完整性
        
        Returns:
            每个模型的验证结果 {model_name: (是否完整, 缺失文件列表)}
        """
        logger.info("🔍 验证所有模型完整性...")
        
        results = {}
        for model_name in self.model_configs:
            is_complete, missing = self.check_model_integrity(model_name)
            results[model_name] = (is_complete, missing)
            
            config = self.model_configs[model_name]
            if is_complete:
                logger.info(f"✅ {config['display_name']}: 完整")
            else:
                logger.warning(f"❌ {config['display_name']}: 缺失 {missing}")
        
        complete_count = sum(1 for is_complete, _ in results.values() if is_complete)
        total_count = len(results)
        
        logger.info(f"\n📊 验证结果: {complete_count}/{total_count} 个模型完整")
        
        return results
    
    def get_download_info(self) -> Dict:
        """获取下载信息和统计"""
        info = {
            "models_dir": str(self.models_dir),
            "total_models": len(self.model_configs),
            "models": {}
        }
        
        total_size_estimate = 0
        for model_name, config in self.model_configs.items():
            is_complete, missing = self.check_model_integrity(model_name)
            
            # 估算大小（简单解析）
            size_str = config["size_estimate"]
            size_mb = 0
            if "MB" in size_str:
                size_mb = int(size_str.replace("MB", ""))
            elif "GB" in size_str:
                size_mb = int(float(size_str.replace("GB", "")) * 1024)
            
            total_size_estimate += size_mb
            
            info["models"][model_name] = {
                "display_name": config["display_name"],
                "description": config["description"],
                "size_estimate": config["size_estimate"],
                "priority": config["priority"],
                "is_complete": is_complete,
                "missing_files": missing if not is_complete else [],
                "status": self.download_status.get("models", {}).get(model_name, {})
            }
        
        info["total_size_estimate"] = f"{total_size_estimate}MB ({total_size_estimate/1024:.1f}GB)"
        info["complete_models"] = sum(1 for m in info["models"].values() if m["is_complete"])
        
        return info
    
    def clean_incomplete_models(self):
        """清理不完整的模型文件"""
        logger.info("🧹 清理不完整的模型文件...")
        
        cleaned = 0
        for model_name in self.model_configs:
            is_complete, missing = self.check_model_integrity(model_name)
            if not is_complete:
                model_dir = self.models_dir / model_name
                if model_dir.exists():
                    try:
                        shutil.rmtree(model_dir)
                        logger.info(f"   清理了不完整的模型: {model_name}")
                        cleaned += 1
                    except Exception as e:
                        logger.error(f"   清理模型 {model_name} 失败: {e}")
        
        if cleaned > 0:
            logger.info(f"✅ 清理了 {cleaned} 个不完整的模型")
        else:
            logger.info("✅ 没有发现需要清理的模型")
    
    def print_status(self):
        """打印当前状态"""
        info = self.get_download_info()
        
        print("\n" + "="*80)
        print("🤖 模型下载器状态")
        print("="*80)
        print(f"模型目录: {info['models_dir']}")
        print(f"总模型数: {info['total_models']}")
        print(f"完整模型: {info['complete_models']}/{info['total_models']}")
        print(f"预估大小: {info['total_size_estimate']}")
        
        if self.download_status.get("last_update"):
            last_update = time.strftime("%Y-%m-%d %H:%M:%S", 
                                     time.localtime(self.download_status["last_update"]))
            print(f"最后更新: {last_update}")
        
        print("\n📋 模型详情:")
        print("-" * 80)
        
        for model_name, model_info in info["models"].items():
            status = "✅ 完整" if model_info["is_complete"] else "❌ 不完整"
            priority = f"P{model_info['priority']}"
            size = model_info["size_estimate"]
            
            print(f"{priority:3} | {status:8} | {size:8} | {model_info['display_name']}")
            if not model_info["is_complete"] and model_info["missing_files"]:
                print(f"     缺失文件: {', '.join(model_info['missing_files'])}")
        
        print("="*80)

def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(
        description="模型下载器 - 下载 dialogue_service 所需的所有 AI 模型",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
使用示例:
    python download_models.py --download                    # 下载所有模型
    python download_models.py --verify                      # 验证模型完整性
    python download_models.py --info                        # 显示模型信息
    python download_models.py --download --force            # 强制重新下载
    python download_models.py --clean                       # 清理不完整的模型
        """
    )
    
    parser.add_argument("--download", action="store_true", help="下载所有必需的模型")
    parser.add_argument("--verify", action="store_true", help="验证模型完整性")
    parser.add_argument("--info", action="store_true", help="显示模型信息和状态")
    parser.add_argument("--clean", action="store_true", help="清理不完整的模型")
    parser.add_argument("--force", action="store_true", help="强制重新下载（与--download配合使用）")
    parser.add_argument("--models-dir", type=Path, help="指定模型存储目录")
    parser.add_argument("--max-workers", type=int, default=1, help="最大并发下载数（默认1）")
    
    args = parser.parse_args()
    
    # 创建下载器
    downloader = ModelDownloader(args.models_dir)
    
    # 如果没有指定任何操作，显示状态
    if not any([args.download, args.verify, args.info, args.clean]):
        downloader.print_status()
        return
    
    try:
        # 执行操作
        if args.info:
            downloader.print_status()
        
        if args.clean:
            downloader.clean_incomplete_models()
        
        if args.verify:
            downloader.verify_all_models()
        
        if args.download:
            downloader.download_all_models(
                force=args.force,
                max_workers=args.max_workers
            )
            
    except KeyboardInterrupt:
        logger.info("\n⏹️ 用户中断操作")
        sys.exit(1)
    except Exception as e:
        logger.error(f"❌ 操作失败: {str(e)}")
        sys.exit(1)

if __name__ == "__main__":
    main()