#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AI模型管理工具
用于手动下载和管理rembg模型文件
"""

import os
import sys
import requests
from pathlib import Path
from typing import Optional, Dict, Tuple
import hashlib
import time
from datetime import datetime


class ModelManager:
    """AI模型管理器"""
    
    # 模型下载URL和信息
    MODELS = {
        'u2net': {
            'url': 'https://github.com/danielgatis/rembg/releases/download/v0.0.0/u2net.onnx',
            'filename': 'u2net.onnx',
            'size_mb': 176,
            'description': '通用背景移除模型（推荐）'
        },
        'u2net_human_seg': {
            'url': 'https://github.com/danielgatis/rembg/releases/download/v0.0.0/u2net_human_seg.onnx',
            'filename': 'u2net_human_seg.onnx', 
            'size_mb': 176,
            'description': '专门用于人像分割'
        },
        'silueta': {
            'url': 'https://github.com/danielgatis/rembg/releases/download/v0.0.0/silueta.onnx',
            'filename': 'silueta.onnx',
            'size_mb': 43,
            'description': '高精度模型'
        }
    }
    
    def __init__(self):
        """初始化模型管理器"""
        self.model_dir = self._get_model_directory()
        self._ensure_model_directory()
    
    def _get_model_directory(self) -> Path:
        """获取模型存储目录"""
        # 首先检查是否在打包的exe中
        if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
            # 如果是打包后的exe文件
            bundled_models_dir = Path(sys._MEIPASS) / 'models'
            if bundled_models_dir.exists():
                print(f"[模型源] 使用打包的模型文件: {bundled_models_dir}")
                return bundled_models_dir
        
        # 检查当前目录下的models文件夹（开发模式）
        local_models_dir = Path.cwd() / 'models'
        if local_models_dir.exists() and any(local_models_dir.glob('*.onnx')):
            print(f"[模型源] 使用本地模型文件: {local_models_dir}")
            return local_models_dir
        
        # 检查环境变量U2NET_HOME
        u2net_home = os.environ.get('U2NET_HOME')
        if u2net_home:
            return Path(u2net_home)
        
        # 默认使用用户主目录下的.u2net
        return Path.home() / '.u2net'
    
    def _ensure_model_directory(self):
        """确保模型目录存在"""
        # 如果是打包的模型目录，不需要创建
        if getattr(sys, 'frozen', False) and 'models' in str(self.model_dir):
            print(f"[模型目录] {self.model_dir}")
            return
        
        try:
            self.model_dir.mkdir(parents=True, exist_ok=True)
            print(f"[模型目录] {self.model_dir}")
        except PermissionError:
            # 如果无法在主目录创建，尝试当前目录
            self.model_dir = Path.cwd() / '.u2net'
            self.model_dir.mkdir(parents=True, exist_ok=True)
            print(f"[模型目录] 使用当前目录: {self.model_dir}")
            # 设置环境变量
            os.environ['U2NET_HOME'] = str(self.model_dir)
    
    def list_models(self):
        """列出可用的模型"""
        print("🤖 AI抠图模型状态检查")
        print("=" * 60)
        
        total_models = len(self.MODELS)
        downloaded_models = 0
        
        for model_name, info in self.MODELS.items():
            status = self.check_model_status(model_name)
            if status:
                downloaded_models += 1
                status_text = "✅ 已下载"
                file_path = self.model_dir / info['filename']
                file_size = file_path.stat().st_size / (1024 * 1024) if file_path.exists() else 0
                size_info = f"({file_size:.1f}MB)"
            else:
                status_text = "❌ 未下载"
                size_info = f"(需要 {info['size_mb']}MB)"
            
            print(f"{model_name:20} | {status_text:10} | {size_info:15}")
            print(f"{'':20} | {info['description']}")
            print()
        
        print("=" * 60)
        print(f"📊 总计: {downloaded_models}/{total_models} 个模型已下载")
        
        if downloaded_models == 0:
            print("⚠️  没有找到任何已下载的模型文件")
            print("💡 建议先下载 u2net 模型")
        elif downloaded_models < total_models:
            print(f"💡 还有 {total_models - downloaded_models} 个模型可以下载")
        else:
            print("🎉 所有模型都已下载完成！")
        
        return downloaded_models, total_models
    
    def check_model_status(self, model_name: str) -> bool:
        """检查模型是否已下载"""
        if model_name not in self.MODELS:
            return False
        
        model_file = self.model_dir / self.MODELS[model_name]['filename']
        return model_file.exists() and model_file.stat().st_size > 1024  # 至少1KB
    
    def get_remote_model_info(self, model_name: str) -> Optional[Dict[str, str]]:
        """获取远程模型文件信息"""
        if model_name not in self.MODELS:
            return None
        
        max_retries = 3
        for attempt in range(max_retries):
            try:
                # 设置更长的超时时间和请求头
                headers = {
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                }
                
                # 发送HEAD请求获取文件头信息
                response = requests.head(
                    self.MODELS[model_name]['url'], 
                    timeout=15, 
                    headers=headers,
                    allow_redirects=True
                )
                response.raise_for_status()
                
                return {
                    'etag': response.headers.get('etag', '').strip('"'),
                    'last_modified': response.headers.get('last-modified', ''),
                    'content_length': response.headers.get('content-length', '0')
                }
            except requests.exceptions.RequestException as e:
                if attempt < max_retries - 1:
                    print(f"\r{model_name:20} | 🔄 重试 {attempt + 1}/{max_retries}   |", end='')
                    time.sleep(2)  # 等待2秒后重试
                    continue
                else:
                    print(f"\r{model_name:20} | ⚠️ 网络异常     | 无法访问远程服务器")
                    return None
            except Exception as e:
                print(f"\r{model_name:20} | ❌ 检查失败     | {str(e)[:30]}...")
                return None
        
        return None
    
    def get_local_model_info(self, model_name: str) -> Optional[Dict[str, str]]:
        """获取本地模型文件信息"""
        if not self.check_model_status(model_name):
            return None
        
        model_file = self.model_dir / self.MODELS[model_name]['filename']
        try:
            stat_info = model_file.stat()
            
            # 计算文件的MD5作为本地标识
            with open(model_file, 'rb') as f:
                # 只读取文件的前1MB和后1MB来快速计算哈希
                file_size = stat_info.st_size
                if file_size <= 2 * 1024 * 1024:  # 小于2MB的文件读取全部
                    content = f.read()
                else:
                    # 大文件只读取首尾部分
                    start_content = f.read(1024 * 1024)  # 前1MB
                    f.seek(-1024 * 1024, 2)  # 移动到文件末尾前1MB
                    end_content = f.read(1024 * 1024)  # 后1MB
                    content = start_content + end_content
                
                file_hash = hashlib.md5(content).hexdigest()
            
            return {
                'size': str(file_size),
                'mtime': datetime.fromtimestamp(stat_info.st_mtime).isoformat(),
                'hash': file_hash
            }
        except Exception as e:
            print(f"⚠️ 获取本地模型信息失败 ({model_name}): {e}")
            return None
    
    def check_model_updates(self, model_name: str = None) -> Dict[str, bool]:
        """检查模型更新"""
        models_to_check = [model_name] if model_name else list(self.MODELS.keys())
        update_status = {}
        
        print("🔍 检查模型更新...")
        print("=" * 60)
        
        for model in models_to_check:
            if not self.check_model_status(model):
                print(f"{model:20} | ❌ 未下载      | 需要下载")
                update_status[model] = True
                continue
            
            print(f"{model:20} | 🔍 检查中...    |", end='')
            
            # 获取远程和本地信息
            remote_info = self.get_remote_model_info(model)
            local_info = self.get_local_model_info(model)
            
            if not remote_info:
                print(f"\r{model:20} | ⚠️ 无法检查     | 网络问题")
                update_status[model] = False
                continue
            
            # 比较文件大小
            remote_size = int(remote_info.get('content_length', '0'))
            local_size = int(local_info.get('size', '0')) if local_info else 0
            
            needs_update = False
            reason = ""
            
            if remote_size != local_size and remote_size > 0:
                needs_update = True
                reason = f"大小不匹配 (远程:{remote_size//1024//1024}MB vs 本地:{local_size//1024//1024}MB)"
            elif remote_info.get('etag') and local_info:
                # 如果有ETag，可以进行更精确的比较
                # 这里简化处理，如果大小一致就认为是最新的
                reason = "大小一致，认为是最新版本"
            else:
                reason = "无法确定版本，建议重新下载"
                needs_update = True
            
            status_text = "🔄 需要更新" if needs_update else "✅ 最新版本"
            print(f"\r{model:20} | {status_text:12} | {reason}")
            
            update_status[model] = needs_update
        
        print("=" * 60)
        
        need_update_count = sum(update_status.values())
        if need_update_count > 0:
            print(f"📋 发现 {need_update_count} 个模型需要更新")
        else:
            print("🎉 所有模型都是最新版本！")
        
        return update_status
    
    def update_models(self, model_name: str = None, force: bool = False) -> bool:
        """更新模型到最新版本"""
        if model_name:
            models_to_update = [model_name]
        else:
            # 检查所有模型的更新状态
            update_status = self.check_model_updates()
            models_to_update = [name for name, needs_update in update_status.items() if needs_update]
        
        if not models_to_update:
            print("✅ 没有模型需要更新")
            return True
        
        print(f"\n🔄 开始更新 {len(models_to_update)} 个模型...")
        
        success_count = 0
        for model in models_to_update:
            print(f"\n📥 更新模型: {model}")
            if self.download_model(model, force=True):
                success_count += 1
                print(f"✅ {model} 更新成功")
            else:
                print(f"❌ {model} 更新失败")
        
        print(f"\n📊 更新结果: {success_count}/{len(models_to_update)} 个模型更新成功")
        return success_count == len(models_to_update)
    
    def download_model(self, model_name: str, force: bool = False) -> bool:
        """
        下载指定模型
        
        Args:
            model_name: 模型名称
            force: 是否强制重新下载
            
        Returns:
            下载是否成功
        """
        if model_name not in self.MODELS:
            print(f"❌ 未知模型: {model_name}")
            print(f"可用模型: {', '.join(self.MODELS.keys())}")
            return False
        
        model_info = self.MODELS[model_name]
        model_file = self.model_dir / model_info['filename']
        
        # 检查是否已存在
        if not force and self.check_model_status(model_name):
            print(f"✅ 模型 {model_name} 已存在: {model_file}")
            return True
        
        print(f"🔄 下载模型: {model_name}")
        print(f"📁 保存路径: {model_file}")
        print(f"📊 文件大小: {model_info['size_mb']} MB")
        print()
        
        try:
            # 创建下载请求
            response = requests.get(model_info['url'], stream=True, timeout=30)
            response.raise_for_status()
            
            # 获取文件大小
            total_size = int(response.headers.get('content-length', 0))
            
            # 下载文件
            with open(model_file, 'wb') as f:
                downloaded = 0
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
                        downloaded += len(chunk)
                        
                        # 显示进度
                        if total_size > 0:
                            progress = (downloaded / total_size) * 100
                            print(f"\r⏳ 下载进度: {progress:.1f}% ({downloaded // 1024 // 1024} MB)", end='')
                        else:
                            print(f"\r⏳ 已下载: {downloaded // 1024 // 1024} MB", end='')
            
            print(f"\n✅ 模型下载完成: {model_name}")
            return True
            
        except requests.RequestException as e:
            print(f"\n❌ 网络错误: {e}")
            print("💡 建议:")
            print("   1. 检查网络连接")
            print("   2. 检查防火墙设置") 
            print("   3. 尝试使用代理")
            print("   4. 稍后重试")
            return False
        except Exception as e:
            print(f"\n❌ 下载失败: {e}")
            # 清理不完整的文件
            if model_file.exists():
                model_file.unlink()
            return False
    
    def remove_model(self, model_name: str) -> bool:
        """删除指定模型"""
        if model_name not in self.MODELS:
            print(f"❌ 未知模型: {model_name}")
            return False
        
        model_file = self.model_dir / self.MODELS[model_name]['filename']
        
        if not model_file.exists():
            print(f"ℹ️ 模型文件不存在: {model_name}")
            return True
        
        try:
            model_file.unlink()
            print(f"✅ 已删除模型: {model_name}")
            return True
        except Exception as e:
            print(f"❌ 删除失败: {e}")
            return False
    
    def clean_cache(self):
        """清理模型缓存"""
        try:
            if self.model_dir.exists():
                deleted_count = 0
                for file in self.model_dir.glob("*.onnx"):
                    file.unlink()
                    print(f"🗑️ 已删除: {file.name}")
                    deleted_count += 1
                
                if deleted_count > 0:
                    print(f"✅ 缓存清理完成，删除了 {deleted_count} 个文件")
                else:
                    print("ℹ️ 无缓存文件需要清理")
            else:
                print("ℹ️ 模型目录不存在，无需清理")
        except Exception as e:
            print(f"❌ 清理失败: {e}")


def main():
    """命令行工具主函数"""
    if len(sys.argv) < 2:
        print("🤖 AI模型管理工具")
        print()
        print("用法:")
        print("  python model_manager.py list                   # 列出所有模型状态")
        print("  python model_manager.py check-updates         # 检查模型更新")
        print("  python model_manager.py update [model]        # 更新模型到最新版本")
        print("  python model_manager.py download <model>      # 下载指定模型")
        print("  python model_manager.py remove <model>        # 删除指定模型")
        print("  python model_manager.py clean                 # 清理所有模型缓存")
        print()
        print("可用模型: u2net, u2net_human_seg, silueta")
        print()
        print("示例:")
        print("  python model_manager.py check-updates         # 检查所有模型更新")
        print("  python model_manager.py update                # 更新所有需要更新的模型")
        print("  python model_manager.py update u2net          # 只更新u2net模型")
        return
    
    manager = ModelManager()
    command = sys.argv[1].lower()
    
    if command == 'list':
        manager.list_models()
    elif command == 'check-updates':
        if len(sys.argv) > 2:
            model_name = sys.argv[2]
            manager.check_model_updates(model_name)
        else:
            manager.check_model_updates()
    elif command == 'update':
        if len(sys.argv) > 2:
            model_name = sys.argv[2]
            force = '--force' in sys.argv
            manager.update_models(model_name, force)
        else:
            force = '--force' in sys.argv
            manager.update_models(force=force)
    elif command == 'download':
        if len(sys.argv) < 3:
            print("❌ 请指定要下载的模型名称")
            return
        model_name = sys.argv[2]
        force = '--force' in sys.argv
        manager.download_model(model_name, force)
    elif command == 'remove':
        if len(sys.argv) < 3:
            print("❌ 请指定要删除的模型名称")
            return
        model_name = sys.argv[2]
        manager.remove_model(model_name)
    elif command == 'clean':
        manager.clean_cache()
    else:
        print(f"❌ 未知命令: {command}")


if __name__ == "__main__":
    main()
