"""
存储管理器

实现数据分片、压缩存储、分片索引管理等功能。
"""

import json
import gzip
import lz4.frame
import hashlib
import time
from pathlib import Path
from typing import Dict, List, Optional, Any, Union, Iterator
from dataclasses import dataclass
from datetime import datetime, timezone, timedelta

from ..utils.logger import get_module_logger
from ..utils.file_utils import FileManager, parse_size_string, format_size
from ..utils.exceptions import StorageError

logger = get_module_logger(__name__)


@dataclass
class ChunkInfo:
    """分片信息"""
    chunk_id: str
    file_path: str
    document_count: int
    uncompressed_size: int
    compressed_size: int
    compression_type: str
    created_at: datetime
    checksum: Optional[str] = None


@dataclass
class StorageConfig:
    """存储配置"""
    chunk_size: str = "100MB"
    compression: str = "gzip"  # gzip, lz4, none
    compression_level: int = 6
    enable_checksum: bool = True
    max_chunks_per_dir: int = 1000


class DataChunker:
    """数据分片器
    
    负责将大量数据分割成小的分片文件。
    """
    
    def __init__(self, config: Optional[StorageConfig] = None):
        """初始化数据分片器
        
        Args:
            config: 存储配置
        """
        self.config = config or StorageConfig()
        self.file_manager = FileManager()
        
        # 解析分片大小
        self.max_chunk_size = parse_size_string(self.config.chunk_size)
        
        logger.info(f"数据分片器初始化，分片大小: {self.config.chunk_size}")
    
    def create_chunk_writer(self, output_dir: Path, chunk_prefix: str = "chunk") -> "ChunkWriter":
        """创建分片写入器
        
        Args:
            output_dir: 输出目录
            chunk_prefix: 分片文件前缀
            
        Returns:
            分片写入器
        """
        return ChunkWriter(self, output_dir, chunk_prefix)
    
    def write_chunk(
        self,
        documents: List[Dict[str, Any]],
        output_path: Path,
        chunk_metadata: Optional[Dict[str, Any]] = None
    ) -> ChunkInfo:
        """写入单个分片
        
        Args:
            documents: 文档列表
            output_path: 输出路径
            chunk_metadata: 分片元数据
            
        Returns:
            分片信息
        """
        # 准备分片数据
        chunk_data = {
            "chunk_metadata": chunk_metadata or {},
            "document_count": len(documents),
            "documents": documents,
            "created_at": datetime.now(timezone.utc).isoformat()
        }
        
        # 序列化数据
        json_data = json.dumps(chunk_data, ensure_ascii=False, default=str)
        uncompressed_data = json_data.encode('utf-8')
        uncompressed_size = len(uncompressed_data)
        
        # 压缩数据
        compressed_data, compression_type = self._compress_data(uncompressed_data)
        compressed_size = len(compressed_data)
        
        # 确保输出目录存在
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        # 写入文件
        with open(output_path, 'wb') as f:
            f.write(compressed_data)
        
        # 计算校验和
        checksum = None
        if self.config.enable_checksum:
            checksum = self._calculate_checksum(compressed_data)
        
        # 创建分片信息
        chunk_info = ChunkInfo(
            chunk_id=output_path.stem,
            file_path=str(output_path),
            document_count=len(documents),
            uncompressed_size=uncompressed_size,
            compressed_size=compressed_size,
            compression_type=compression_type,
            created_at=datetime.now(timezone.utc),
            checksum=checksum
        )
        
        logger.debug(
            f"分片已写入: {output_path}, "
            f"{len(documents)} 文档, "
            f"压缩率: {compressed_size/uncompressed_size:.2%}"
        )
        
        return chunk_info
    
    def read_chunk(self, chunk_path: Path) -> List[Dict[str, Any]]:
        """读取分片数据
        
        Args:
            chunk_path: 分片文件路径
            
        Returns:
            文档列表
        """
        try:
            # 读取文件
            with open(chunk_path, 'rb') as f:
                compressed_data = f.read()
            
            # 解压数据
            decompressed_data = self._decompress_data(compressed_data, chunk_path.suffix)
            
            # 解析JSON
            chunk_data = json.loads(decompressed_data.decode('utf-8'))
            
            return chunk_data.get('documents', [])
            
        except Exception as e:
            logger.error(f"读取分片失败: {chunk_path}, {e}")
            raise StorageError(f"读取分片失败: {chunk_path}", str(chunk_path), e)
    
    def _compress_data(self, data: bytes) -> tuple[bytes, str]:
        """压缩数据
        
        Args:
            data: 原始数据
            
        Returns:
            (压缩后数据, 压缩类型)
        """
        if self.config.compression == "gzip":
            compressed = gzip.compress(data, compresslevel=self.config.compression_level)
            return compressed, "gzip"
        elif self.config.compression == "lz4":
            compressed = lz4.frame.compress(data, compression_level=self.config.compression_level)
            return compressed, "lz4"
        else:
            return data, "none"
    
    def _decompress_data(self, data: bytes, file_suffix: str) -> bytes:
        """解压数据
        
        Args:
            data: 压缩数据
            file_suffix: 文件后缀
            
        Returns:
            解压后数据
        """
        if file_suffix == ".gz" or self.config.compression == "gzip":
            return gzip.decompress(data)
        elif file_suffix == ".lz4" or self.config.compression == "lz4":
            return lz4.frame.decompress(data)
        else:
            return data
    
    def _calculate_checksum(self, data: bytes) -> str:
        """计算数据校验和
        
        Args:
            data: 数据
            
        Returns:
            校验和
        """
        import hashlib
        return hashlib.sha256(data).hexdigest()


class ChunkWriter:
    """分片写入器
    
    提供流式写入分片的功能。
    """
    
    def __init__(self, chunker: DataChunker, output_dir: Path, chunk_prefix: str):
        """初始化分片写入器
        
        Args:
            chunker: 数据分片器
            output_dir: 输出目录
            chunk_prefix: 分片前缀
        """
        self.chunker = chunker
        self.output_dir = output_dir
        self.chunk_prefix = chunk_prefix
        
        self.current_chunk_docs = []
        self.current_chunk_size = 0
        self.chunk_index = 0
        self.chunks_info = []
        
        # 确保输出目录存在
        self.output_dir.mkdir(parents=True, exist_ok=True)
    
    def add_document(self, document: Dict[str, Any]) -> Optional[ChunkInfo]:
        """添加文档到当前分片
        
        Args:
            document: 文档数据
            
        Returns:
            如果分片已满并写入，返回分片信息
        """
        # 估算文档大小
        doc_size = len(json.dumps(document, ensure_ascii=False).encode('utf-8'))
        
        # 检查是否需要写入当前分片
        if (self.current_chunk_size + doc_size > self.chunker.max_chunk_size and 
            self.current_chunk_docs):
            chunk_info = self._write_current_chunk()
            return chunk_info
        
        # 添加文档到当前分片
        self.current_chunk_docs.append(document)
        self.current_chunk_size += doc_size
        
        return None
    
    def finalize(self) -> Optional[ChunkInfo]:
        """完成写入，处理最后一个分片
        
        Returns:
            最后一个分片的信息（如果有）
        """
        if self.current_chunk_docs:
            return self._write_current_chunk()
        return None
    
    def _write_current_chunk(self) -> ChunkInfo:
        """写入当前分片
        
        Returns:
            分片信息
        """
        # 生成分片文件名
        chunk_filename = f"{self.chunk_prefix}_{self.chunk_index:06d}.json"
        if self.chunker.config.compression == "gzip":
            chunk_filename += ".gz"
        elif self.chunker.config.compression == "lz4":
            chunk_filename += ".lz4"
        
        # 计算子目录（避免单个目录文件过多）
        subdir_index = self.chunk_index // self.chunker.config.max_chunks_per_dir
        subdir = self.output_dir / f"chunks_{subdir_index:03d}"
        chunk_path = subdir / chunk_filename
        
        # 写入分片
        chunk_info = self.chunker.write_chunk(
            self.current_chunk_docs,
            chunk_path,
            {"chunk_index": self.chunk_index}
        )
        
        self.chunks_info.append(chunk_info)
        
        # 重置当前分片
        self.current_chunk_docs = []
        self.current_chunk_size = 0
        self.chunk_index += 1
        
        return chunk_info
    
    def get_chunks_info(self) -> List[ChunkInfo]:
        """获取所有分片信息
        
        Returns:
            分片信息列表
        """
        return self.chunks_info.copy()


class StorageManager:
    """存储管理器
    
    管理备份数据的存储、索引和检索。
    """
    
    def __init__(self, config: Optional[StorageConfig] = None):
        """初始化存储管理器
        
        Args:
            config: 存储配置
        """
        self.config = config or StorageConfig()
        self.chunker = DataChunker(config)
        self.file_manager = FileManager()
        
        logger.info("存储管理器初始化完成")
    
    def create_backup_storage(self, backup_path: Path) -> "BackupStorage":
        """创建备份存储
        
        Args:
            backup_path: 备份路径
            
        Returns:
            备份存储对象
        """
        return BackupStorage(self, backup_path)
    
    def list_chunks(self, backup_path: Path) -> List[ChunkInfo]:
        """列出备份中的所有分片
        
        Args:
            backup_path: 备份路径
            
        Returns:
            分片信息列表
        """
        chunks_info = []
        
        # 查找所有分片文件
        for chunk_file in backup_path.rglob("chunk_*.json*"):
            try:
                # 从文件名解析分片信息
                chunk_info = self._parse_chunk_file(chunk_file)
                chunks_info.append(chunk_info)
            except Exception as e:
                logger.warning(f"解析分片文件失败: {chunk_file}, {e}")
        
        # 按分片索引排序
        chunks_info.sort(key=lambda x: x.chunk_id)
        return chunks_info
    
    def _parse_chunk_file(self, chunk_file: Path) -> ChunkInfo:
        """解析分片文件信息
        
        Args:
            chunk_file: 分片文件路径
            
        Returns:
            分片信息
        """
        # 获取文件统计信息
        stat = chunk_file.stat()
        
        # 确定压缩类型
        if chunk_file.suffix == ".gz":
            compression_type = "gzip"
        elif chunk_file.suffix == ".lz4":
            compression_type = "lz4"
        else:
            compression_type = "none"
        
        return ChunkInfo(
            chunk_id=chunk_file.stem.replace('.json', ''),
            file_path=str(chunk_file),
            document_count=0,  # 需要读取文件才能获取
            uncompressed_size=0,  # 需要读取文件才能获取
            compressed_size=stat.st_size,
            compression_type=compression_type,
            created_at=datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc)
        )
    
    def get_storage_stats(self, backup_path: Path) -> Dict[str, Any]:
        """获取存储统计信息
        
        Args:
            backup_path: 备份路径
            
        Returns:
            存储统计信息
        """
        chunks = self.list_chunks(backup_path)
        
        total_compressed_size = sum(chunk.compressed_size for chunk in chunks)
        total_uncompressed_size = sum(chunk.uncompressed_size for chunk in chunks)
        total_documents = sum(chunk.document_count for chunk in chunks)
        
        compression_ratio = (total_compressed_size / total_uncompressed_size 
                           if total_uncompressed_size > 0 else 0)
        
        return {
            "total_chunks": len(chunks),
            "total_documents": total_documents,
            "total_compressed_size": total_compressed_size,
            "total_uncompressed_size": total_uncompressed_size,
            "compression_ratio": compression_ratio,
            "compressed_size_formatted": format_size(total_compressed_size),
            "uncompressed_size_formatted": format_size(total_uncompressed_size),
            "average_chunk_size": total_compressed_size // len(chunks) if chunks else 0
        }


class BackupStorage:
    """备份存储
    
    管理单个备份的存储操作。
    """
    
    def __init__(self, storage_manager: StorageManager, backup_path: Path):
        """初始化备份存储
        
        Args:
            storage_manager: 存储管理器
            backup_path: 备份路径
        """
        self.storage_manager = storage_manager
        self.backup_path = backup_path
        self.chunks_info = []
        
        # 确保备份目录存在
        self.backup_path.mkdir(parents=True, exist_ok=True)
    
    def create_chunk_writer(self, chunk_prefix: str = "chunk") -> ChunkWriter:
        """创建分片写入器
        
        Args:
            chunk_prefix: 分片前缀
            
        Returns:
            分片写入器
        """
        return self.storage_manager.chunker.create_chunk_writer(
            self.backup_path, chunk_prefix
        )
    
    def read_all_documents(self) -> Iterator[Dict[str, Any]]:
        """读取所有文档
        
        Yields:
            文档数据
        """
        chunks = self.storage_manager.list_chunks(self.backup_path)
        
        for chunk in chunks:
            try:
                documents = self.storage_manager.chunker.read_chunk(Path(chunk.file_path))
                for document in documents:
                    yield document
            except Exception as e:
                logger.error(f"读取分片失败: {chunk.file_path}, {e}")
                raise
    
    def get_stats(self) -> Dict[str, Any]:
        """获取备份存储统计信息
        
        Returns:
            统计信息
        """
        return self.storage_manager.get_storage_stats(self.backup_path)


class LocalStorageManager:
    """本地存储管理器

    管理本地文件系统存储、目录管理、清理策略等。
    """

    def __init__(self, root_path: Optional[Union[str, Path]] = None, config: Optional[Dict[str, Any]] = None):
        """初始化本地存储管理器

        Args:
            root_path: 存储根路径
            config: 配置信息
        """
        self.root_path = Path(root_path) if root_path else Path.cwd() / "es-archive-storage"
        self.config = config or {}
        self.file_manager = FileManager()

        # 默认配置
        self.max_storage_size = parse_size_string(self.config.get("max_storage_size", "10GB"))
        self.retention_days = self.config.get("retention_days", 30)
        self.cleanup_threshold = self.config.get("cleanup_threshold", 0.9)  # 90%

        # 确保根目录存在
        self.root_path.mkdir(parents=True, exist_ok=True)

        # 创建子目录结构
        self.backups_dir = self.root_path / "backups"
        self.temp_dir = self.root_path / "temp"
        self.logs_dir = self.root_path / "logs"

        for directory in [self.backups_dir, self.temp_dir, self.logs_dir]:
            directory.mkdir(exist_ok=True)

        logger.info(f"本地存储管理器初始化完成，根路径: {self.root_path}")

    def allocate_backup_space(self, backup_id: str, estimated_size: int) -> Path:
        """分配备份空间

        Args:
            backup_id: 备份ID
            estimated_size: 预估大小（字节）

        Returns:
            分配的备份路径
        """
        logger.info(f"为备份分配空间: {backup_id}, 预估大小: {format_size(estimated_size)}")

        # 检查可用空间
        available_space = self.get_available_space()
        if estimated_size > available_space:
            # 尝试清理空间
            self.cleanup_storage()
            available_space = self.get_available_space()

            if estimated_size > available_space:
                raise StorageError(
                    f"存储空间不足: 需要 {format_size(estimated_size)}, "
                    f"可用 {format_size(available_space)}"
                )

        # 创建备份目录
        backup_path = self.backups_dir / backup_id
        backup_path.mkdir(parents=True, exist_ok=True)

        logger.info(f"备份空间分配完成: {backup_path}")
        return backup_path

    def get_storage_usage(self) -> Dict[str, Any]:
        """获取存储使用情况

        Returns:
            存储使用统计
        """
        try:
            # 计算各目录大小
            backups_size = self._calculate_directory_size(self.backups_dir)
            temp_size = self._calculate_directory_size(self.temp_dir)
            logs_size = self._calculate_directory_size(self.logs_dir)
            total_used = backups_size + temp_size + logs_size

            # 获取磁盘空间信息
            disk_usage = self.file_manager.get_disk_usage(self.root_path)

            return {
                "root_path": str(self.root_path),
                "total_used_bytes": total_used,
                "backups_size_bytes": backups_size,
                "temp_size_bytes": temp_size,
                "logs_size_bytes": logs_size,
                "disk_total_bytes": disk_usage["total"],
                "disk_used_bytes": disk_usage["used"],
                "disk_free_bytes": disk_usage["free"],
                "usage_percentage": total_used / self.max_storage_size if self.max_storage_size > 0 else 0,
                "formatted": {
                    "total_used": format_size(total_used),
                    "backups_size": format_size(backups_size),
                    "temp_size": format_size(temp_size),
                    "logs_size": format_size(logs_size),
                    "disk_total": format_size(disk_usage["total"]),
                    "disk_used": format_size(disk_usage["used"]),
                    "disk_free": format_size(disk_usage["free"])
                }
            }

        except Exception as e:
            logger.error(f"获取存储使用情况失败: {e}")
            return {}

    def get_available_space(self) -> int:
        """获取可用存储空间

        Returns:
            可用空间（字节）
        """
        try:
            usage = self.get_storage_usage()

            # 计算基于配置限制的可用空间
            config_available = self.max_storage_size - usage.get("total_used_bytes", 0)

            # 计算基于磁盘空间的可用空间
            disk_available = usage.get("disk_free_bytes", 0)

            # 返回较小值
            return max(0, min(config_available, disk_available))

        except Exception as e:
            logger.error(f"获取可用空间失败: {e}")
            return 0

    def list_backups(self) -> List[Dict[str, Any]]:
        """列出所有备份

        Returns:
            备份列表
        """
        backups = []

        try:
            for backup_dir in self.backups_dir.iterdir():
                if backup_dir.is_dir():
                    backup_info = self._get_backup_info(backup_dir)
                    if backup_info:
                        backups.append(backup_info)

            # 按创建时间排序
            backups.sort(key=lambda x: x["created_time"], reverse=True)

        except Exception as e:
            logger.error(f"列出备份失败: {e}")

        return backups

    def delete_backup(self, backup_id: str) -> bool:
        """删除备份

        Args:
            backup_id: 备份ID

        Returns:
            是否删除成功
        """
        try:
            backup_path = self.backups_dir / backup_id

            if not backup_path.exists():
                logger.warning(f"备份不存在: {backup_id}")
                return False

            logger.info(f"删除备份: {backup_id}")

            # 删除备份目录
            import shutil
            shutil.rmtree(backup_path)

            logger.info(f"备份删除完成: {backup_id}")
            return True

        except Exception as e:
            logger.error(f"删除备份失败: {backup_id}, {e}")
            return False

    def cleanup_storage(self) -> Dict[str, Any]:
        """清理存储空间

        Returns:
            清理结果
        """
        logger.info("开始清理存储空间")

        cleanup_result = {
            "cleaned_backups": [],
            "cleaned_temp_files": [],
            "freed_space_bytes": 0,
            "errors": []
        }

        try:
            # 获取当前使用情况
            usage = self.get_storage_usage()
            usage_ratio = usage.get("usage_percentage", 0)

            # 如果使用率超过阈值，执行清理
            if usage_ratio > self.cleanup_threshold:
                logger.info(f"存储使用率 {usage_ratio:.1%} 超过阈值 {self.cleanup_threshold:.1%}，开始清理")

                # 清理过期备份
                expired_backups = self._find_expired_backups()
                for backup_info in expired_backups:
                    if self.delete_backup(backup_info["backup_id"]):
                        cleanup_result["cleaned_backups"].append(backup_info)
                        cleanup_result["freed_space_bytes"] += backup_info["size_bytes"]

                # 清理临时文件
                temp_files = self._find_temp_files()
                for temp_file in temp_files:
                    try:
                        file_size = temp_file.stat().st_size
                        temp_file.unlink()
                        cleanup_result["cleaned_temp_files"].append(str(temp_file))
                        cleanup_result["freed_space_bytes"] += file_size
                    except Exception as e:
                        cleanup_result["errors"].append(f"删除临时文件失败: {temp_file}, {e}")

            logger.info(f"存储清理完成，释放空间: {format_size(cleanup_result['freed_space_bytes'])}")

        except Exception as e:
            logger.error(f"存储清理失败: {e}")
            cleanup_result["errors"].append(str(e))

        return cleanup_result

    def create_temp_file(self, prefix: str = "temp", suffix: str = "") -> Path:
        """创建临时文件

        Args:
            prefix: 文件前缀
            suffix: 文件后缀

        Returns:
            临时文件路径
        """
        import tempfile

        temp_fd, temp_path = tempfile.mkstemp(
            prefix=prefix,
            suffix=suffix,
            dir=self.temp_dir
        )

        # 关闭文件描述符
        import os
        os.close(temp_fd)

        return Path(temp_path)

    def _calculate_directory_size(self, directory: Path) -> int:
        """计算目录大小

        Args:
            directory: 目录路径

        Returns:
            目录大小（字节）
        """
        total_size = 0

        try:
            for file_path in directory.rglob("*"):
                if file_path.is_file():
                    total_size += file_path.stat().st_size
        except Exception as e:
            logger.warning(f"计算目录大小失败: {directory}, {e}")

        return total_size

    def _get_backup_info(self, backup_dir: Path) -> Optional[Dict[str, Any]]:
        """获取备份信息

        Args:
            backup_dir: 备份目录

        Returns:
            备份信息
        """
        try:
            metadata_file = backup_dir / "metadata.json"

            if metadata_file.exists():
                with open(metadata_file, 'r', encoding='utf-8') as f:
                    metadata = json.load(f)

                # 计算备份大小
                backup_size = self._calculate_directory_size(backup_dir)

                return {
                    "backup_id": backup_dir.name,
                    "backup_path": str(backup_dir),
                    "size_bytes": backup_size,
                    "size_formatted": format_size(backup_size),
                    "created_time": metadata.get("start_time"),
                    "status": metadata.get("status"),
                    "source_index": metadata.get("source_index"),
                    "document_count": metadata.get("backed_up_documents", 0)
                }

        except Exception as e:
            logger.warning(f"获取备份信息失败: {backup_dir}, {e}")

        return None

    def _find_expired_backups(self) -> List[Dict[str, Any]]:
        """查找过期备份

        Returns:
            过期备份列表
        """
        expired_backups = []
        cutoff_time = datetime.now(timezone.utc) - timedelta(days=self.retention_days)

        for backup_info in self.list_backups():
            try:
                created_time_str = backup_info.get("created_time")
                if created_time_str:
                    created_time = datetime.fromisoformat(created_time_str.replace('Z', '+00:00'))
                    if created_time < cutoff_time:
                        expired_backups.append(backup_info)
            except Exception as e:
                logger.warning(f"解析备份创建时间失败: {backup_info['backup_id']}, {e}")

        return expired_backups

    def _find_temp_files(self) -> List[Path]:
        """查找临时文件

        Returns:
            临时文件列表
        """
        temp_files = []
        cutoff_time = time.time() - (24 * 3600)  # 24小时前

        try:
            for temp_file in self.temp_dir.rglob("*"):
                if temp_file.is_file():
                    # 删除超过24小时的临时文件
                    if temp_file.stat().st_mtime < cutoff_time:
                        temp_files.append(temp_file)
        except Exception as e:
            logger.warning(f"查找临时文件失败: {e}")

        return temp_files

    def get_storage_health(self) -> Dict[str, Any]:
        """获取存储健康状态

        Returns:
            健康状态信息
        """
        health = {
            "status": "healthy",
            "issues": [],
            "recommendations": []
        }

        try:
            usage = self.get_storage_usage()
            usage_ratio = usage.get("usage_percentage", 0)

            # 检查存储使用率
            if usage_ratio > 0.9:
                health["status"] = "critical"
                health["issues"].append(f"存储使用率过高: {usage_ratio:.1%}")
                health["recommendations"].append("立即清理过期备份或增加存储空间")
            elif usage_ratio > 0.8:
                health["status"] = "warning"
                health["issues"].append(f"存储使用率较高: {usage_ratio:.1%}")
                health["recommendations"].append("考虑清理过期备份")

            # 检查磁盘空间
            disk_usage_ratio = usage.get("disk_used_bytes", 0) / usage.get("disk_total_bytes", 1)
            if disk_usage_ratio > 0.95:
                health["status"] = "critical"
                health["issues"].append(f"磁盘空间不足: {disk_usage_ratio:.1%}")
                health["recommendations"].append("清理磁盘空间或迁移到更大的磁盘")

            # 检查临时文件
            temp_files = self._find_temp_files()
            if len(temp_files) > 100:
                health["issues"].append(f"临时文件过多: {len(temp_files)} 个")
                health["recommendations"].append("清理临时文件")

        except Exception as e:
            health["status"] = "error"
            health["issues"].append(f"健康检查失败: {e}")

        return health


class CompressionManager:
    """压缩管理器

    管理不同的压缩算法和压缩策略。
    """

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """初始化压缩管理器

        Args:
            config: 压缩配置
        """
        self.config = config or {}

        # 支持的压缩算法
        self.supported_algorithms = {
            "gzip": {
                "compress": self._gzip_compress,
                "decompress": self._gzip_decompress,
                "extension": ".gz",
                "description": "标准gzip压缩，平衡压缩率和速度"
            },
            "lz4": {
                "compress": self._lz4_compress,
                "decompress": self._lz4_decompress,
                "extension": ".lz4",
                "description": "高速压缩，适合实时场景"
            },
            "none": {
                "compress": self._no_compress,
                "decompress": self._no_decompress,
                "extension": "",
                "description": "不压缩，保持原始格式"
            }
        }

        logger.info("压缩管理器初始化完成")

    def compress_data(
        self,
        data: bytes,
        algorithm: str = "gzip",
        level: int = 6
    ) -> tuple[bytes, Dict[str, Any]]:
        """压缩数据

        Args:
            data: 原始数据
            algorithm: 压缩算法
            level: 压缩级别

        Returns:
            (压缩后数据, 压缩信息)
        """
        if algorithm not in self.supported_algorithms:
            raise ValueError(f"不支持的压缩算法: {algorithm}")

        start_time = time.time()
        original_size = len(data)

        # 执行压缩
        compress_func = self.supported_algorithms[algorithm]["compress"]
        compressed_data = compress_func(data, level)

        compressed_size = len(compressed_data)
        compression_time = time.time() - start_time

        # 计算压缩信息
        compression_info = {
            "algorithm": algorithm,
            "level": level,
            "original_size": original_size,
            "compressed_size": compressed_size,
            "compression_ratio": compressed_size / original_size if original_size > 0 else 0,
            "space_saved": original_size - compressed_size,
            "compression_time": compression_time,
            "compression_speed": original_size / compression_time if compression_time > 0 else 0
        }

        return compressed_data, compression_info

    def decompress_data(
        self,
        compressed_data: bytes,
        algorithm: str = "gzip"
    ) -> tuple[bytes, Dict[str, Any]]:
        """解压数据

        Args:
            compressed_data: 压缩数据
            algorithm: 压缩算法

        Returns:
            (解压后数据, 解压信息)
        """
        if algorithm not in self.supported_algorithms:
            raise ValueError(f"不支持的压缩算法: {algorithm}")

        start_time = time.time()
        compressed_size = len(compressed_data)

        # 执行解压
        decompress_func = self.supported_algorithms[algorithm]["decompress"]
        decompressed_data = decompress_func(compressed_data)

        decompressed_size = len(decompressed_data)
        decompression_time = time.time() - start_time

        # 计算解压信息
        decompression_info = {
            "algorithm": algorithm,
            "compressed_size": compressed_size,
            "decompressed_size": decompressed_size,
            "decompression_time": decompression_time,
            "decompression_speed": decompressed_size / decompression_time if decompression_time > 0 else 0
        }

        return decompressed_data, decompression_info

    def benchmark_algorithms(
        self,
        test_data: bytes,
        algorithms: Optional[List[str]] = None,
        levels: Optional[List[int]] = None
    ) -> Dict[str, Any]:
        """基准测试压缩算法

        Args:
            test_data: 测试数据
            algorithms: 要测试的算法列表
            levels: 要测试的压缩级别列表

        Returns:
            基准测试结果
        """
        algorithms = algorithms or list(self.supported_algorithms.keys())
        levels = levels or [1, 6, 9]

        results = {
            "test_data_size": len(test_data),
            "algorithms": {}
        }

        for algorithm in algorithms:
            if algorithm == "none":
                # 无压缩算法不需要测试级别
                compressed_data, info = self.compress_data(test_data, algorithm, 0)
                results["algorithms"][algorithm] = {
                    "levels": {0: info}
                }
            else:
                results["algorithms"][algorithm] = {"levels": {}}

                for level in levels:
                    try:
                        compressed_data, info = self.compress_data(test_data, algorithm, level)

                        # 测试解压
                        decompressed_data, decomp_info = self.decompress_data(compressed_data, algorithm)

                        # 验证数据完整性
                        data_integrity = decompressed_data == test_data

                        # 合并信息
                        combined_info = {**info, **decomp_info, "data_integrity": data_integrity}
                        results["algorithms"][algorithm]["levels"][level] = combined_info

                    except Exception as e:
                        results["algorithms"][algorithm]["levels"][level] = {
                            "error": str(e)
                        }

        # 分析最佳算法
        best_compression = self._find_best_algorithm(results, "compression_ratio")
        best_speed = self._find_best_algorithm(results, "compression_speed")

        results["recommendations"] = {
            "best_compression": best_compression,
            "best_speed": best_speed,
            "balanced": self._find_balanced_algorithm(results)
        }

        return results

    def get_algorithm_info(self, algorithm: str) -> Dict[str, Any]:
        """获取算法信息

        Args:
            algorithm: 算法名称

        Returns:
            算法信息
        """
        if algorithm not in self.supported_algorithms:
            return {}

        return self.supported_algorithms[algorithm].copy()

    def list_supported_algorithms(self) -> List[Dict[str, Any]]:
        """列出支持的压缩算法

        Returns:
            算法列表
        """
        algorithms = []

        for name, info in self.supported_algorithms.items():
            algorithms.append({
                "name": name,
                "extension": info["extension"],
                "description": info["description"]
            })

        return algorithms

    def _gzip_compress(self, data: bytes, level: int) -> bytes:
        """gzip压缩"""
        return gzip.compress(data, compresslevel=level)

    def _gzip_decompress(self, data: bytes) -> bytes:
        """gzip解压"""
        return gzip.decompress(data)

    def _lz4_compress(self, data: bytes, level: int) -> bytes:
        """lz4压缩"""
        try:
            import lz4.frame
            return lz4.frame.compress(data, compression_level=level)
        except ImportError:
            raise ImportError("lz4库未安装，请运行: pip install lz4")

    def _lz4_decompress(self, data: bytes) -> bytes:
        """lz4解压"""
        try:
            import lz4.frame
            return lz4.frame.decompress(data)
        except ImportError:
            raise ImportError("lz4库未安装，请运行: pip install lz4")

    def _no_compress(self, data: bytes, level: int) -> bytes:
        """不压缩"""
        return data

    def _no_decompress(self, data: bytes) -> bytes:
        """不解压"""
        return data

    def _find_best_algorithm(self, results: Dict[str, Any], metric: str) -> Dict[str, Any]:
        """查找最佳算法

        Args:
            results: 基准测试结果
            metric: 评估指标

        Returns:
            最佳算法信息
        """
        best_value = 0 if metric in ["compression_speed", "decompression_speed"] else float('inf')
        best_algorithm = None
        best_level = None

        for algorithm, algo_data in results["algorithms"].items():
            for level, level_data in algo_data["levels"].items():
                if metric in level_data and isinstance(level_data[metric], (int, float)):
                    value = level_data[metric]

                    if metric in ["compression_speed", "decompression_speed"]:
                        # 速度越快越好
                        if value > best_value:
                            best_value = value
                            best_algorithm = algorithm
                            best_level = level
                    else:
                        # 压缩率越小越好
                        if value < best_value:
                            best_value = value
                            best_algorithm = algorithm
                            best_level = level

        return {
            "algorithm": best_algorithm,
            "level": best_level,
            "value": best_value,
            "metric": metric
        }

    def _find_balanced_algorithm(self, results: Dict[str, Any]) -> Dict[str, Any]:
        """查找平衡的算法（综合考虑压缩率和速度）

        Args:
            results: 基准测试结果

        Returns:
            平衡算法信息
        """
        best_score = 0
        best_algorithm = None
        best_level = None

        for algorithm, algo_data in results["algorithms"].items():
            for level, level_data in algo_data["levels"].items():
                if ("compression_ratio" in level_data and
                    "compression_speed" in level_data and
                    isinstance(level_data["compression_ratio"], (int, float)) and
                    isinstance(level_data["compression_speed"], (int, float))):

                    # 计算平衡分数（压缩率越小越好，速度越快越好）
                    compression_score = 1 - level_data["compression_ratio"]  # 转换为越大越好
                    speed_score = level_data["compression_speed"] / 1000000  # 标准化速度

                    # 加权平均（可以调整权重）
                    balanced_score = 0.6 * compression_score + 0.4 * speed_score

                    if balanced_score > best_score:
                        best_score = balanced_score
                        best_algorithm = algorithm
                        best_level = level

        return {
            "algorithm": best_algorithm,
            "level": best_level,
            "score": best_score,
            "metric": "balanced"
        }
