"""
模型管理器 - 负责模型下载和缓存管理
"""
import os
import logging
from pathlib import Path
from typing import Dict, Any, Optional
import subprocess
import sys

logger = logging.getLogger(__name__)

class ModelManager:
    """模型管理器类"""
    
    def __init__(self, models_cache_dir: Optional[Path] = None):
        """初始化模型管理器"""
        self.models_cache_dir = models_cache_dir or Path(__file__).parent.parent / 'models_cache'
        self.models_cache_dir.mkdir(exist_ok=True)
        
        # 配置hf-mirror环境变量
        self._setup_hf_mirror()
        
        # 预定义的模型列表
        self.required_models = {
            'granite_docling': {
                'repo_id': 'ibm-granite/granite-docling-258M',
                'description': 'Granite Docling 视觉语言模型',
                'required': True
            },
            'layout_model': {
                'repo_id': 'ds4sd/docling-models',
                'description': '布局检测模型',
                'required': True
            },
            'table_structure': {
                'repo_id': 'ds4sd/docling-models',
                'description': '表格结构识别模型',
                'required': False
            },
            'code_formula': {
                'repo_id': 'ds4sd/CodeFormulaV2',
                'description': '代码和公式识别模型',
                'required': False
            },
            'picture_classifier': {
                'repo_id': 'ds4sd/DocumentFigureClassifier',
                'description': '图片分类模型',
                'required': False
            }
        }
    
    def _setup_hf_mirror(self):
        """配置HuggingFace镜像源"""
        try:
            # 设置环境变量
            hf_endpoint = os.environ.get('HF_ENDPOINT', 'https://hf-mirror.com')
            os.environ['HF_ENDPOINT'] = hf_endpoint
            os.environ['HF_HOME'] = str(self.models_cache_dir)
            
            # 设置HuggingFace Hub的镜像
            try:
                import huggingface_hub
                # 如果使用hf-mirror，设置镜像端点
                if 'hf-mirror.com' in hf_endpoint:
                    huggingface_hub.constants.HUGGINGFACE_HUB_CACHE = str(self.models_cache_dir)
                    logger.info(f"已配置HF镜像源: {hf_endpoint}")
            except ImportError:
                logger.warning("huggingface_hub未安装，无法配置镜像")
            
            logger.info(f"模型缓存目录: {self.models_cache_dir}")
            
        except Exception as e:
            logger.error(f"配置HF镜像源失败: {str(e)}")
    
    def get_models_status(self) -> Dict[str, Any]:
        """获取模型状态"""
        status = {}
        
        for model_name, model_info in self.required_models.items():
            try:
                model_path = self._get_model_cache_path(model_info['repo_id'])
                is_available = model_path.exists() and any(model_path.iterdir())
                
                status[model_name] = {
                    'available': is_available,
                    'repo_id': model_info['repo_id'],
                    'description': model_info['description'],
                    'required': model_info['required'],
                    'path': str(model_path) if is_available else None,
                    'size': self._get_directory_size(model_path) if is_available else None
                }
                
            except Exception as e:
                logger.error(f"检查模型{model_name}状态时出错: {str(e)}")
                status[model_name] = {
                    'available': False,
                    'error': str(e),
                    'repo_id': model_info['repo_id'],
                    'description': model_info['description'],
                    'required': model_info['required']
                }
        
        return status
    
    def download_models(self, force: bool = False) -> Dict[str, Any]:
        """
        下载所需的模型
        
        Args:
            force: 是否强制重新下载
            
        Returns:
            下载结果
        """
        results = {}
        
        try:
            # 使用docling-tools下载模型
            logger.info("开始下载Docling模型...")
            
            # 构建下载命令
            cmd = [
                sys.executable, '-m', 'docling.cli.models', 'download',
                '--output-dir', str(self.models_cache_dir)
            ]
            
            if force:
                cmd.append('--force')
            
            # 执行下载命令
            result = subprocess.run(
                cmd,
                capture_output=True,
                text=True,
                timeout=1800  # 30分钟超时
            )
            
            if result.returncode == 0:
                logger.info("模型下载成功")
                results['status'] = 'success'
                results['message'] = '模型下载完成'
                results['output'] = result.stdout
            else:
                logger.error(f"模型下载失败: {result.stderr}")
                results['status'] = 'error'
                results['message'] = f'模型下载失败: {result.stderr}'
                
        except subprocess.TimeoutExpired:
            logger.error("模型下载超时")
            results['status'] = 'error'
            results['message'] = '模型下载超时，请检查网络连接'
            
        except Exception as e:
            logger.error(f"下载模型时出错: {str(e)}")
            results['status'] = 'error'
            results['message'] = f'下载模型时出错: {str(e)}'
        
        return results
    
    def _get_model_cache_path(self, repo_id: str) -> Path:
        """获取模型缓存路径"""
        # HuggingFace Hub的缓存路径格式
        safe_repo_id = repo_id.replace('/', '--')
        return self.models_cache_dir / safe_repo_id
    
    def _get_directory_size(self, path: Path) -> str:
        """获取目录大小"""
        try:
            if not path.exists():
                return "0 B"
            
            total_size = 0
            for file_path in path.rglob('*'):
                if file_path.is_file():
                    total_size += file_path.stat().st_size
            
            return self._format_size(total_size)
            
        except Exception as e:
            logger.error(f"计算目录大小失败: {str(e)}")
            return "未知"
    
    def _format_size(self, size_bytes: int) -> str:
        """格式化文件大小"""
        if size_bytes == 0:
            return "0 B"
        
        size_names = ["B", "KB", "MB", "GB", "TB"]
        i = 0
        while size_bytes >= 1024 and i < len(size_names) - 1:
            size_bytes /= 1024.0
            i += 1
        
        return f"{size_bytes:.1f} {size_names[i]}"
    
    def check_models_availability(self) -> bool:
        """检查必需的模型是否可用"""
        status = self.get_models_status()
        
        for model_name, model_status in status.items():
            if self.required_models[model_name]['required'] and not model_status['available']:
                return False
        
        return True
    
    def get_cache_info(self) -> Dict[str, Any]:
        """获取缓存信息"""
        try:
            cache_size = self._get_directory_size(self.models_cache_dir)
            file_count = len(list(self.models_cache_dir.rglob('*')))
            
            return {
                'cache_dir': str(self.models_cache_dir),
                'total_size': cache_size,
                'file_count': file_count,
                'hf_endpoint': os.environ.get('HF_ENDPOINT', 'default'),
                'models_available': self.check_models_availability()
            }
            
        except Exception as e:
            logger.error(f"获取缓存信息失败: {str(e)}")
            return {
                'cache_dir': str(self.models_cache_dir),
                'error': str(e)
            }
    
    def clear_cache(self) -> Dict[str, Any]:
        """清理模型缓存"""
        try:
            import shutil
            
            if self.models_cache_dir.exists():
                shutil.rmtree(self.models_cache_dir)
                self.models_cache_dir.mkdir(exist_ok=True)
                
            logger.info("模型缓存已清理")
            return {'status': 'success', 'message': '模型缓存已清理'}
            
        except Exception as e:
            logger.error(f"清理缓存失败: {str(e)}")
            return {'status': 'error', 'message': f'清理缓存失败: {str(e)}'}
