"""
元数据提取服务

提供高级的元数据提取和管理功能。
"""

import asyncio
import json
import os
from typing import Dict, List, Any, Optional, Union
from pathlib import Path
from datetime import datetime
import logging

from src.core.di import Injectable, Inject, Service
from src.modules.extractors import (
    CompositeMetadataExtractor, 
    VideoMetadata, 
    AudioMetadata,
    ExtractorFactory
)


@Service("metadata_service")
class MetadataService:
    """
    元数据提取服务
    
    提供高级的元数据提取、缓存和管理功能。
    """
    
    def __init__(self,
                 extractor: CompositeMetadataExtractor = Inject(CompositeMetadataExtractor),
                 config: Dict[str, Any] = Inject("config"),
                 logger: logging.Logger = Inject("logger")):
        self.extractor = extractor
        self.config = config
        self.logger = logger
        
        # 服务配置
        self.service_config = config.get("metadata_service", {})
        self.cache_enabled = self.service_config.get("enable_cache", True)
        self.cache_dir = Path(self.service_config.get("cache_dir", "cache/metadata"))
        self.cache_ttl = self.service_config.get("cache_ttl_hours", 24)  # 缓存有效期（小时）
        
        # 创建缓存目录
        if self.cache_enabled:
            self.cache_dir.mkdir(parents=True, exist_ok=True)
    
    async def extract_metadata(self, file_path: str, **kwargs) -> Union[VideoMetadata, AudioMetadata]:
        """
        提取文件元数据
        
        Args:
            file_path: 文件路径
            **kwargs: 其他参数
        
        Returns:
            元数据对象
        """
        self.logger.info(f"提取元数据: {file_path}")
        
        # 检查缓存
        if self.cache_enabled and not kwargs.get("force_refresh", False):
            cached_metadata = await self._get_cached_metadata(file_path)
            if cached_metadata:
                self.logger.info(f"使用缓存的元数据: {file_path}")
                return cached_metadata
        
        # 提取元数据
        metadata = await self.extractor.extract_metadata(file_path, **kwargs)
        
        # 缓存元数据
        if self.cache_enabled:
            await self._cache_metadata(file_path, metadata)
        
        return metadata
    
    async def batch_extract_metadata(self, 
                                   file_paths: List[str], 
                                   max_concurrent: int = None,
                                   **kwargs) -> Dict[str, Union[VideoMetadata, AudioMetadata]]:
        """
        批量提取元数据
        
        Args:
            file_paths: 文件路径列表
            max_concurrent: 最大并发数
            **kwargs: 其他参数
        
        Returns:
            文件路径到元数据的映射
        """
        self.logger.info(f"批量提取 {len(file_paths)} 个文件的元数据")
        
        if max_concurrent is None:
            max_concurrent = self.service_config.get("max_concurrent_extractions", 4)
        
        # 使用信号量限制并发
        semaphore = asyncio.Semaphore(max_concurrent)
        
        async def extract_single(file_path: str):
            async with semaphore:
                try:
                    return await self.extract_metadata(file_path, **kwargs)
                except Exception as e:
                    self.logger.error(f"提取 {file_path} 元数据失败: {e}")
                    return None
        
        # 创建任务
        tasks = {file_path: asyncio.create_task(extract_single(file_path)) 
                for file_path in file_paths}
        
        # 等待所有任务完成
        results = {}
        for file_path, task in tasks.items():
            results[file_path] = await task
        
        success_count = len([r for r in results.values() if r is not None])
        self.logger.info(f"批量元数据提取完成: 成功 {success_count}/{len(file_paths)}")
        
        return results
    
    async def analyze_directory(self, 
                              directory: str, 
                              recursive: bool = True,
                              file_patterns: List[str] = None) -> Dict[str, Any]:
        """
        分析目录中的所有媒体文件
        
        Args:
            directory: 目录路径
            recursive: 是否递归搜索
            file_patterns: 文件模式列表
        
        Returns:
            分析结果
        """
        self.logger.info(f"分析目录: {directory}")
        
        if file_patterns is None:
            file_patterns = ["*.mp4", "*.avi", "*.mov", "*.mkv", "*.mp3", "*.wav", "*.aac"]
        
        # 搜索文件
        dir_path = Path(directory)
        if not dir_path.exists():
            raise FileNotFoundError(f"目录不存在: {directory}")
        
        files = []
        for pattern in file_patterns:
            if recursive:
                files.extend(dir_path.rglob(pattern))
            else:
                files.extend(dir_path.glob(pattern))
        
        file_paths = [str(f) for f in files]
        self.logger.info(f"找到 {len(file_paths)} 个媒体文件")
        
        if not file_paths:
            return {
                "directory": directory,
                "file_count": 0,
                "files": {},
                "summary": {}
            }
        
        # 批量提取元数据
        metadata_results = await self.batch_extract_metadata(file_paths)
        
        # 生成分析报告
        analysis = self._analyze_metadata_collection(metadata_results)
        analysis.update({
            "directory": directory,
            "file_count": len(file_paths),
            "files": metadata_results
        })
        
        return analysis
    
    async def compare_files(self, file_paths: List[str]) -> Dict[str, Any]:
        """
        比较多个文件的元数据
        
        Args:
            file_paths: 文件路径列表
        
        Returns:
            比较结果
        """
        self.logger.info(f"比较 {len(file_paths)} 个文件的元数据")
        
        # 提取所有文件的元数据
        metadata_results = await self.batch_extract_metadata(file_paths)
        
        # 过滤成功的结果
        valid_metadata = {path: meta for path, meta in metadata_results.items() if meta is not None}
        
        if len(valid_metadata) < 2:
            raise ValueError("至少需要2个有效的文件进行比较")
        
        # 生成比较报告
        comparison = self._generate_comparison_report(valid_metadata)
        
        return comparison
    
    async def export_metadata(self, 
                            file_paths: List[str], 
                            output_path: str,
                            format: str = "json") -> str:
        """
        导出元数据到文件
        
        Args:
            file_paths: 文件路径列表
            output_path: 输出文件路径
            format: 输出格式 (json, csv, xml)
        
        Returns:
            输出文件路径
        """
        self.logger.info(f"导出 {len(file_paths)} 个文件的元数据到 {output_path}")
        
        # 提取元数据
        metadata_results = await self.batch_extract_metadata(file_paths)
        
        # 根据格式导出
        if format.lower() == "json":
            await self._export_json(metadata_results, output_path)
        elif format.lower() == "csv":
            await self._export_csv(metadata_results, output_path)
        elif format.lower() == "xml":
            await self._export_xml(metadata_results, output_path)
        else:
            raise ValueError(f"不支持的导出格式: {format}")
        
        self.logger.info(f"元数据导出完成: {output_path}")
        return output_path
    
    async def _get_cached_metadata(self, file_path: str) -> Optional[Union[VideoMetadata, AudioMetadata]]:
        """获取缓存的元数据"""
        try:
            cache_file = self._get_cache_file_path(file_path)
            if not cache_file.exists():
                return None
            
            # 检查缓存是否过期
            cache_mtime = cache_file.stat().st_mtime
            file_mtime = os.path.getmtime(file_path)
            cache_age_hours = (datetime.now().timestamp() - cache_mtime) / 3600
            
            if cache_age_hours > self.cache_ttl or file_mtime > cache_mtime:
                # 缓存过期或文件已更新
                cache_file.unlink()
                return None
            
            # 读取缓存
            with open(cache_file, 'r', encoding='utf-8') as f:
                cache_data = json.load(f)
            
            # 根据类型创建元数据对象
            if cache_data.get("metadata_type") == "video":
                return VideoMetadata.from_dict(cache_data["metadata"])
            else:
                return AudioMetadata.from_dict(cache_data["metadata"])
                
        except Exception as e:
            self.logger.warning(f"读取缓存失败: {e}")
            return None
    
    async def _cache_metadata(self, file_path: str, metadata: Union[VideoMetadata, AudioMetadata]) -> None:
        """缓存元数据"""
        try:
            cache_file = self._get_cache_file_path(file_path)
            
            cache_data = {
                "file_path": file_path,
                "metadata_type": "video" if isinstance(metadata, VideoMetadata) else "audio",
                "metadata": metadata.to_dict(),
                "cached_at": datetime.now().isoformat()
            }
            
            with open(cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, indent=2, ensure_ascii=False, default=str)
                
        except Exception as e:
            self.logger.warning(f"缓存元数据失败: {e}")
    
    def _get_cache_file_path(self, file_path: str) -> Path:
        """获取缓存文件路径"""
        import hashlib
        
        # 使用文件路径的哈希作为缓存文件名
        file_hash = hashlib.md5(file_path.encode()).hexdigest()
        return self.cache_dir / f"{file_hash}.json"

    def _analyze_metadata_collection(self, metadata_results: Dict[str, Union[VideoMetadata, AudioMetadata]]) -> Dict[str, Any]:
        """分析元数据集合"""
        valid_metadata = {path: meta for path, meta in metadata_results.items() if meta is not None}

        if not valid_metadata:
            return {"summary": {"total_files": 0, "valid_files": 0}}

        # 分类统计
        video_files = []
        audio_files = []

        for path, metadata in valid_metadata.items():
            if isinstance(metadata, VideoMetadata):
                video_files.append((path, metadata))
            else:
                audio_files.append((path, metadata))

        summary = {
            "total_files": len(metadata_results),
            "valid_files": len(valid_metadata),
            "video_files": len(video_files),
            "audio_files": len(audio_files)
        }

        # 视频统计
        if video_files:
            video_stats = self._analyze_video_collection([meta for _, meta in video_files])
            summary["video_stats"] = video_stats

        # 音频统计
        if audio_files:
            audio_stats = self._analyze_audio_collection([meta for _, meta in audio_files])
            summary["audio_stats"] = audio_stats

        return {"summary": summary}

    def _analyze_video_collection(self, video_metadata: List[VideoMetadata]) -> Dict[str, Any]:
        """分析视频元数据集合"""
        if not video_metadata:
            return {}

        # 基本统计
        durations = [meta.duration for meta in video_metadata if meta.duration > 0]
        file_sizes = [meta.file_size for meta in video_metadata if meta.file_size > 0]
        resolutions = [(meta.width, meta.height) for meta in video_metadata if meta.width > 0 and meta.height > 0]
        fps_values = [meta.fps for meta in video_metadata if meta.fps > 0]

        stats = {
            "count": len(video_metadata),
            "total_duration": sum(durations),
            "average_duration": sum(durations) / len(durations) if durations else 0,
            "total_size": sum(file_sizes),
            "average_size": sum(file_sizes) / len(file_sizes) if file_sizes else 0
        }

        # 分辨率分布
        if resolutions:
            resolution_counts = {}
            for width, height in resolutions:
                res_key = f"{width}x{height}"
                resolution_counts[res_key] = resolution_counts.get(res_key, 0) + 1
            stats["resolution_distribution"] = resolution_counts

        # 帧率分布
        if fps_values:
            fps_counts = {}
            for fps in fps_values:
                fps_key = f"{fps:.1f}"
                fps_counts[fps_key] = fps_counts.get(fps_key, 0) + 1
            stats["fps_distribution"] = fps_counts

        # 编码器分布
        codecs = [meta.video_codec for meta in video_metadata if meta.video_codec]
        if codecs:
            codec_counts = {}
            for codec in codecs:
                codec_counts[codec] = codec_counts.get(codec, 0) + 1
            stats["codec_distribution"] = codec_counts

        return stats

    def _analyze_audio_collection(self, audio_metadata: List[AudioMetadata]) -> Dict[str, Any]:
        """分析音频元数据集合"""
        if not audio_metadata:
            return {}

        # 基本统计
        durations = [meta.duration for meta in audio_metadata if meta.duration > 0]
        file_sizes = [meta.file_size for meta in audio_metadata if meta.file_size > 0]
        bitrates = [meta.bitrate for meta in audio_metadata if meta.bitrate > 0]
        sample_rates = [meta.sample_rate for meta in audio_metadata if meta.sample_rate > 0]

        stats = {
            "count": len(audio_metadata),
            "total_duration": sum(durations),
            "average_duration": sum(durations) / len(durations) if durations else 0,
            "total_size": sum(file_sizes),
            "average_size": sum(file_sizes) / len(file_sizes) if file_sizes else 0,
            "average_bitrate": sum(bitrates) / len(bitrates) if bitrates else 0
        }

        # 采样率分布
        if sample_rates:
            sr_counts = {}
            for sr in sample_rates:
                sr_key = f"{sr}Hz"
                sr_counts[sr_key] = sr_counts.get(sr_key, 0) + 1
            stats["sample_rate_distribution"] = sr_counts

        # 编码器分布
        codecs = [meta.codec for meta in audio_metadata if meta.codec]
        if codecs:
            codec_counts = {}
            for codec in codecs:
                codec_counts[codec] = codec_counts.get(codec, 0) + 1
            stats["codec_distribution"] = codec_counts

        return stats

    def _generate_comparison_report(self, metadata_dict: Dict[str, Union[VideoMetadata, AudioMetadata]]) -> Dict[str, Any]:
        """生成比较报告"""
        files = list(metadata_dict.keys())
        metadata_list = list(metadata_dict.values())

        comparison = {
            "files": files,
            "comparison_time": datetime.now().isoformat(),
            "similarities": {},
            "differences": {}
        }

        # 检查文件类型一致性
        types = [type(meta).__name__ for meta in metadata_list]
        if len(set(types)) == 1:
            comparison["file_type"] = types[0]

            if isinstance(metadata_list[0], VideoMetadata):
                comparison.update(self._compare_video_metadata(metadata_dict))
            else:
                comparison.update(self._compare_audio_metadata(metadata_dict))
        else:
            comparison["file_type"] = "mixed"
            comparison["type_distribution"] = {t: types.count(t) for t in set(types)}

        return comparison

    def _compare_video_metadata(self, metadata_dict: Dict[str, VideoMetadata]) -> Dict[str, Any]:
        """比较视频元数据"""
        metadata_list = list(metadata_dict.values())

        # 比较基本属性
        properties = ["width", "height", "fps", "duration", "video_codec", "aspect_ratio"]
        similarities = {}
        differences = {}

        for prop in properties:
            values = [getattr(meta, prop) for meta in metadata_list]
            unique_values = list(set(values))

            if len(unique_values) == 1:
                similarities[prop] = unique_values[0]
            else:
                differences[prop] = {files[i]: values[i] for i, files in enumerate(metadata_dict.keys())}

        return {"similarities": similarities, "differences": differences}

    def _compare_audio_metadata(self, metadata_dict: Dict[str, AudioMetadata]) -> Dict[str, Any]:
        """比较音频元数据"""
        metadata_list = list(metadata_dict.values())

        # 比较基本属性
        properties = ["codec", "bitrate", "sample_rate", "channels", "duration"]
        similarities = {}
        differences = {}

        for prop in properties:
            values = [getattr(meta, prop) for meta in metadata_list]
            unique_values = list(set(values))

            if len(unique_values) == 1:
                similarities[prop] = unique_values[0]
            else:
                differences[prop] = {files[i]: values[i] for i, files in enumerate(metadata_dict.keys())}

        return {"similarities": similarities, "differences": differences}

    async def _export_json(self, metadata_results: Dict[str, Union[VideoMetadata, AudioMetadata]], output_path: str) -> None:
        """导出为JSON格式"""
        export_data = {}
        for file_path, metadata in metadata_results.items():
            if metadata:
                export_data[file_path] = metadata.to_dict()
            else:
                export_data[file_path] = None

        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(export_data, f, indent=2, ensure_ascii=False, default=str)

    async def _export_csv(self, metadata_results: Dict[str, Union[VideoMetadata, AudioMetadata]], output_path: str) -> None:
        """导出为CSV格式"""
        import csv

        # 收集所有字段
        all_fields = set()
        valid_metadata = [meta for meta in metadata_results.values() if meta]

        for metadata in valid_metadata:
            all_fields.update(metadata.to_dict().keys())

        # 写入CSV
        with open(output_path, 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=sorted(all_fields))
            writer.writeheader()

            for file_path, metadata in metadata_results.items():
                if metadata:
                    row = metadata.to_dict()
                    row['file_path'] = file_path
                    writer.writerow(row)

    async def _export_xml(self, metadata_results: Dict[str, Union[VideoMetadata, AudioMetadata]], output_path: str) -> None:
        """导出为XML格式"""
        import xml.etree.ElementTree as ET

        root = ET.Element("metadata_collection")

        for file_path, metadata in metadata_results.items():
            file_elem = ET.SubElement(root, "file")
            file_elem.set("path", file_path)

            if metadata:
                metadata_elem = ET.SubElement(file_elem, "metadata")
                metadata_dict = metadata.to_dict()

                for key, value in metadata_dict.items():
                    if value is not None:
                        elem = ET.SubElement(metadata_elem, key)
                        elem.text = str(value)

        tree = ET.ElementTree(root)
        tree.write(output_path, encoding='utf-8', xml_declaration=True)
