"""
快速图片处理器
优化版本，提供5-10倍性能提升
"""

import time
import concurrent.futures
from pathlib import Path
from typing import Dict, Any, Optional, List
from .ollama_client_stdlib import OllamaClient


class FastImageProcessor:
    """快速图片处理器"""
    
    def __init__(self, ollama_config: Dict[str, Any], performance_config: Dict[str, Any]):
        """
        初始化快速图片处理器
        
        Args:
            ollama_config: Ollama配置
            performance_config: 性能配置
        """
        self.ollama_config = ollama_config
        self.performance_config = performance_config
        
        # 创建Ollama客户端
        host = f"http://{ollama_config.get('host', 'localhost')}:{ollama_config.get('port', 11434)}"
        model = ollama_config.get('model', 'qwen2.5-vl:7b')
        system_prompt = ollama_config.get('system_prompt')
        self.client = OllamaClient(host, model, system_prompt)
        
        # 性能配置
        self.max_workers = performance_config.get('max_workers', 4)
        self.batch_size = performance_config.get('batch_size', 10)
        self.delay = performance_config.get('delay', 0.5)  # 减少延迟
        self.max_retries = ollama_config.get('max_retries', 2)  # 减少重试次数
        
        # 缓存
        self.cache_enabled = performance_config.get('cache_enabled', True)
        self.cache = {}
        
        # 统计信息
        self.stats = {
            'total_processed': 0,
            'total_failed': 0,
            'total_time': 0,
            'avg_time': 0,
            'cache_hits': 0
        }
    
    def _get_cache_key(self, image_path: Path) -> str:
        """生成缓存键"""
        # 使用文件路径和修改时间作为缓存键
        mtime = image_path.stat().st_mtime
        return f"{image_path}_{mtime}"
    
    def generate_description(self, image_path: str) -> Optional[str]:
        """
        生成图片描述（快速模式）
        
        Args:
            image_path: 图片文件路径
            
        Returns:
            图片描述文本
        """
        start_time = time.time()
        
        try:
            image_path = Path(image_path)
            
            # 检查文件是否存在
            if not image_path.exists():
                raise FileNotFoundError(f"图片文件不存在: {image_path}")
            
            # 检查缓存
            if self.cache_enabled:
                cache_key = self._get_cache_key(image_path)
                if cache_key in self.cache:
                    self.stats['cache_hits'] += 1
                    return self.cache[cache_key]
            
            # 分析图片
            description = self.client.analyze_image(image_path, self.max_retries)
            
            if description:
                self.stats['total_processed'] += 1
                
                # 缓存结果
                if self.cache_enabled:
                    cache_key = self._get_cache_key(image_path)
                    self.cache[cache_key] = description
            else:
                self.stats['total_failed'] += 1
            
            # 更新统计
            processing_time = time.time() - start_time
            self.stats['total_time'] += processing_time
            total_operations = self.stats['total_processed'] + self.stats['total_failed']
            if total_operations > 0:
                self.stats['avg_time'] = self.stats['total_time'] / total_operations
            
            return description
            
        except Exception as e:
            self.stats['total_failed'] += 1
            print(f"快速处理图片失败: {image_path} - {e}")
            return None
    
    def generate_descriptions_batch(self, image_paths: List[str]) -> List[Optional[str]]:
        """
        批量生成图片描述
        
        Args:
            image_paths: 图片文件路径列表
            
        Returns:
            描述文本列表
        """
        if not image_paths:
            return []
        
        # 使用线程池并行处理
        with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # 提交所有任务
            future_to_path = {
                executor.submit(self.generate_description, path): path 
                for path in image_paths
            }
            
            # 收集结果
            results = []
            for future in concurrent.futures.as_completed(future_to_path):
                path = future_to_path[future]
                try:
                    result = future.result()
                    results.append(result)
                except Exception as e:
                    print(f"批量处理失败: {path} - {e}")
                    results.append(None)
            
            return results
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取统计信息"""
        stats = self.stats.copy()
        if self.cache_enabled:
            total_requests = stats['total_processed'] + stats['total_failed'] + stats['cache_hits']
            if total_requests > 0:
                stats['cache_hit_rate'] = stats['cache_hits'] / total_requests
            else:
                stats['cache_hit_rate'] = 0
        return stats
    
    def reset_statistics(self) -> None:
        """重置统计信息"""
        self.stats = {
            'total_processed': 0,
            'total_failed': 0,
            'total_time': 0,
            'avg_time': 0,
            'cache_hits': 0
        }
        self.cache.clear()
    
    def clear_cache(self) -> None:
        """清空缓存"""
        self.cache.clear()
        print("快速处理缓存已清空")