import os
import time
from typing import Dict, List, Optional

from .config_manager import ConfigManager
from .error_handler import ErrorHandler
from .video_preprocessor import VideoPreprocessor
from .video_processor import VideoProcessor
from .glm_analyzer import GLMAnalyzer
from .result_exporter import ResultExporter
from .parallel_processor import ParallelProcessor

class VideoBatchAnalyzer:
    def __init__(self, config_path: str, api_key: Optional[str] = None, workers: int = 5):
        self.cfg = ConfigManager(config_path, api_key)
        self.categories = self.cfg.load_category_config()
        self.api_key = self.cfg.get_api_key()
        self.error_handler = ErrorHandler()
        self.preprocessor = VideoPreprocessor()
        self.processor = VideoProcessor()
        base_url = self.cfg.get_base_url()
        self.analyzer = GLMAnalyzer(self.api_key, self.categories, base_url=base_url)
        self.exporter = ResultExporter()
        self.parallel = ParallelProcessor(max_workers=workers)

    def _retry(self, fn, *args, max_retries=3, base_delay=2.0, **kwargs):
        for attempt in range(max_retries):
            try:
                return fn(*args, **kwargs)
            except Exception as e:
                if attempt == max_retries - 1:
                    raise e
                time.sleep(base_delay * (2 ** attempt))
        return None

    def process_single_video(self, video_path: str, output_dir: str) -> Optional[Dict]:
        if not self.error_handler.handle_corrupted_video(video_path):
            self.error_handler.log_error(video_path, Exception('corrupted'), 'VideoError')
            return None
        try:
            print(f"[预处理] 开始: {os.path.basename(video_path)}")
            processed_path = self.preprocessor.compress_video_for_analysis(video_path)
            frames = self.processor.extract_video_frames(processed_path, frame_count=3)
            if not frames:
                raise RuntimeError('no frames extracted')
            # 计算视频时长并进行分类（作为模型缺失/未知时的兜底）
            duration = self.processor.get_video_duration(video_path)
            duration_label = self.processor.categorize_duration(duration)
            print(f"[预处理] 时长: {int(duration)}s -> 分类: {duration_label}")
            result = self._retry(self.analyzer.analyze_video_content, frames)
            # 清理临时文件
            if processed_path != video_path and os.path.exists(processed_path):
                try: os.remove(processed_path)
                except Exception: pass
            if not result:
                return None
            # 字段兜底与修正
            # 始终以真实时长为准，覆盖模型可能不准的选择，统一至四档
            result['视频时长'] = duration_label
            # 导出
            outfile = os.path.join(output_dir, self.exporter.generate_filename(video_path))
            self.exporter.save_to_txt(result, outfile)
            print(f"[完成] 输出: {os.path.basename(outfile)}")
            return result
        except Exception as e:
            self.error_handler.log_error(video_path, e, 'ProcessError')
            print(f"[失败] {os.path.basename(video_path)} -> {e}")
            return None

    def process_directory(self, directory_path: str, output_dir: str) -> Dict:
        os.makedirs(output_dir, exist_ok=True)
        video_paths = self._collect_videos(directory_path)
        def worker(p):
            return self.process_single_video(p, output_dir)
        agg = self.parallel.process_videos_parallel(video_paths, worker)
        # 生成失败清单
        if agg['failed']:
            fail_list = os.path.join(output_dir, 'failed_list.txt')
            with open(fail_list, 'w', encoding='utf-8') as f:
                f.write('\n'.join(agg['failed']))
        return agg

    def _collect_videos(self, dir_path: str) -> List[str]:
        paths = []
        for root, _, files in os.walk(dir_path):
            for name in files:
                if name.lower().endswith('.mp4'):
                    paths.append(os.path.join(root, name))
        return paths



