"""
MAESS数据备份与恢复管理模块
提供数据库、Redis、文件系统的备份和恢复功能
"""

import os
import asyncio
import time
import shutil
import subprocess
from datetime import datetime, timedelta
from typing import Dict, Any, List, Optional, Tuple
from pathlib import Path
import json

from loguru import logger
from app.core.config import settings
from app.core.database import db_manager
from app.core.redis_client import get_redis_client
from utils.constants import BackupType, BackupStatus, RestoreMode
from utils.exceptions import BackupError, RestoreError


class BackupConfig:
    """备份配置类"""
    
    def __init__(self):
        # 基础配置
        self.backup_root_dir: Path = Path(settings.BACKUP_ROOT_DIR or "/tmp/maess_backups")
        self.retention_days: int = settings.BACKUP_RETENTION_DAYS or 7
        self.compress: bool = settings.BACKUP_COMPRESS or True
        self.max_backup_size_gb: int = settings.BACKUP_MAX_SIZE_GB or 50
        
        # 数据库备份配置
        self.db_backup_enabled: bool = settings.DB_BACKUP_ENABLED or True
        self.db_backup_schedule: str = settings.DB_BACKUP_SCHEDULE or "0 3 * * *"  # 每天凌晨3点
        self.db_backup_timeout: int = settings.DB_BACKUP_TIMEOUT or 3600  # 1小时
        
        # Redis备份配置
        self.redis_backup_enabled: bool = settings.REDIS_BACKUP_ENABLED or True
        self.redis_backup_schedule: str = settings.REDIS_BACKUP_SCHEDULE or "0 4 * * *"  # 每天凌晨4点
        
        # 文件系统备份配置
        self.fs_backup_enabled: bool = settings.FS_BACKUP_ENABLED or True
        self.fs_backup_schedule: str = settings.FS_BACKUP_SCHEDULE or "0 5 * * *"  # 每天凌晨5点
        self.fs_backup_dirs: List[str] = settings.FS_BACKUP_DIRS or [
            str(Path(settings.APP_DIR) / "data"),
            str(Path(settings.APP_DIR) / "uploads")
        ]
        
        # 验证配置
        self._validate()
    
    def _validate(self):
        """验证配置有效性"""
        # 确保备份根目录存在
        self.backup_root_dir.mkdir(parents=True, exist_ok=True)
        
        # 验证保留天数
        if self.retention_days < 1:
            logger.warning(f"保留天数 {self.retention_days} 无效，设置为默认值 7 天")
            self.retention_days = 7
        
        # 验证文件系统备份目录
        valid_dirs = []
        for dir_path in self.fs_backup_dirs:
            if os.path.exists(dir_path) and os.path.isdir(dir_path):
                valid_dirs.append(dir_path)
            else:
                logger.warning(f"备份目录不存在或不是有效目录: {dir_path}")
        self.fs_backup_dirs = valid_dirs


class BackupRecord:
    """备份记录类"""
    
    def __init__(self, backup_id: str, backup_type: str, created_at: datetime, 
                 size: int = 0, status: str = BackupStatus.COMPLETED):
        self.backup_id: str = backup_id
        self.backup_type: str = backup_type
        self.created_at: datetime = created_at
        self.size: int = size  # bytes
        self.status: str = status
        self.path: Optional[str] = None
        self.metadata: Dict[str, Any] = {}
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            "backup_id": self.backup_id,
            "backup_type": self.backup_type,
            "created_at": self.created_at.isoformat(),
            "size": self.size,
            "status": self.status,
            "path": self.path,
            "metadata": self.metadata
        }
    
    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> "BackupRecord":
        """从字典创建实例"""
        record = cls(
            backup_id=data["backup_id"],
            backup_type=data["backup_type"],
            created_at=datetime.fromisoformat(data["created_at"]),
            size=data.get("size", 0),
            status=data.get("status", BackupStatus.COMPLETED)
        )
        record.path = data.get("path")
        record.metadata = data.get("metadata", {})
        return record


class BackupManager:
    """备份管理器 - 单例模式"""
    
    _instance = None
    _lock = asyncio.Lock()
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance
    
    def __init__(self):
        if not hasattr(self, "_initialized"):
            self._initialized: bool = False
            self._config: Optional[BackupConfig] = None
            self._backup_history: List[BackupRecord] = []
            self._history_file: Optional[Path] = None
            self._active_backups: Dict[str, asyncio.Task] = {}
            self._initialized = True
    
    async def initialize(self):
        """初始化备份管理器"""
        async with self._lock:
            if self._initialized:
                return
            
            try:
                self._config = BackupConfig()
                self._history_file = self._config.backup_root_dir / "backup_history.json"
                
                # 加载备份历史
                await self._load_backup_history()
                
                # 清理过期备份
                await self._cleanup_old_backups()
                
                logger.info("备份管理器初始化成功")
                self._initialized = True
                
            except Exception as e:
                logger.error(f"备份管理器初始化失败: {e}")
                raise
    
    async def _load_backup_history(self):
        """加载备份历史"""
        if not self._history_file.exists():
            logger.info("备份历史文件不存在，将创建新的")
            self._backup_history = []
            return
        
        try:
            with open(self._history_file, "r", encoding="utf-8") as f:
                data = json.load(f)
                self._backup_history = [BackupRecord.from_dict(record) for record in data]
            logger.info(f"已加载 {len(self._backup_history)} 条备份记录")
        except Exception as e:
            logger.error(f"加载备份历史失败: {e}")
            self._backup_history = []
    
    async def _save_backup_history(self):
        """保存备份历史"""
        try:
            data = [record.to_dict() for record in self._backup_history]
            with open(self._history_file, "w", encoding="utf-8") as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logger.error(f"保存备份历史失败: {e}")
    
    async def _cleanup_old_backups(self):
        """清理过期备份"""
        cutoff_date = datetime.now() - timedelta(days=self._config.retention_days)
        expired_records = [r for r in self._backup_history if r.created_at < cutoff_date]
        
        if not expired_records:
            return
        
        logger.info(f"清理 {len(expired_records)} 个过期备份")
        
        for record in expired_records:
            try:
                # 删除备份文件
                if record.path and os.path.exists(record.path):
                    if os.path.isdir(record.path):
                        shutil.rmtree(record.path)
                    else:
                        os.remove(record.path)
                
                # 从历史中移除
                self._backup_history.remove(record)
                logger.info(f"已删除过期备份: {record.backup_id}")
                
            except Exception as e:
                logger.error(f"删除过期备份 {record.backup_id} 失败: {e}")
        
        # 保存更新后的历史
        await self._save_backup_history()
    
    async def create_backup(self, backup_type: str = BackupType.FULL) -> BackupRecord:
        """
        创建备份
        
        Args:
            backup_type: 备份类型 (full/database/redis/filesystem)
        
        Returns:
            BackupRecord: 备份记录
        """
        if backup_type in self._active_backups:
            logger.warning(f"备份 {backup_type} 正在进行中，等待完成...")
            await asyncio.wait([self._active_backups[backup_type]])
        
        # 创建备份任务
        backup_task = asyncio.create_task(self._perform_backup(backup_type))
        self._active_backups[backup_type] = backup_task
        
        try:
            record = await backup_task
            return record
        finally:
            self._active_backups.pop(backup_type, None)
    
    async def _perform_backup(self, backup_type: str) -> BackupRecord:
        """执行备份操作"""
        backup_id = f"{backup_type}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
        record = BackupRecord(
            backup_id=backup_id,
            backup_type=backup_type,
            created_at=datetime.now(),
            status=BackupStatus.IN_PROGRESS
        )
        
        backup_dir = self._config.backup_root_dir / backup_id
        backup_dir.mkdir(parents=True, exist_ok=True)
        
        try:
            logger.info(f"开始创建备份: {backup_id}")
            
            # 根据备份类型执行不同的备份操作
            if backup_type == BackupType.FULL:
                await self._backup_database(backup_dir)
                await self._backup_redis(backup_dir)
                await self._backup_filesystem(backup_dir)
            elif backup_type == BackupType.DATABASE:
                await self._backup_database(backup_dir)
            elif backup_type == BackupType.REDIS:
                await self._backup_redis(backup_dir)
            elif backup_type == BackupType.FILESYSTEM:
                await self._backup_filesystem(backup_dir)
            else:
                raise BackupError(f"不支持的备份类型: {backup_type}")
            
            # 压缩备份（如果配置了）
            if self._config.compress:
                compressed_path = await self._compress_backup(backup_dir)
                record.path = str(compressed_path)
            else:
                record.path = str(backup_dir)
            
            # 计算备份大小
            record.size = self._get_path_size(record.path)
            record.status = BackupStatus.COMPLETED
            
            # 添加到历史并保存
            self._backup_history.append(record)
            await self._save_backup_history()
            
            logger.info(f"备份完成: {backup_id}, 大小: {self._format_size(record.size)}")
            
            # 检查并清理旧备份
            await self._cleanup_old_backups()
            
            return record
            
        except Exception as e:
            record.status = BackupStatus.FAILED
            logger.error(f"备份失败: {backup_id}, 错误: {e}")
            
            # 清理失败的备份
            if os.path.exists(backup_dir):
                shutil.rmtree(backup_dir)
            
            # 仍然记录失败的备份
            self._backup_history.append(record)
            await self._save_backup_history()
            
            raise BackupError(f"备份失败: {str(e)}") from e
    
    async def _backup_database(self, backup_dir: Path):
        """备份PostgreSQL数据库"""
        if not self._config.db_backup_enabled:
            logger.info("数据库备份已禁用")
            return
        
        db_config = settings.DATABASE
        backup_file = backup_dir / "database_backup.sql"
        
        logger.info(f"开始备份数据库到: {backup_file}")
        
        try:
            # 使用pg_dump命令备份数据库
            cmd = [
                "pg_dump",
                f"--host={db_config.host}",
                f"--port={db_config.port}",
                f"--username={db_config.username}",
                f"--dbname={db_config.database}",
                f"--file={backup_file}",
                "--format=c",  # 自定义格式，支持压缩和恢复
                "--compress=9",  # 最高压缩级别
                "--no-owner",
                "--no-privileges"
            ]
            
            # 设置环境变量以避免密码提示
            env = os.environ.copy()
            env["PGPASSWORD"] = db_config.password
            
            # 执行备份命令
            process = await asyncio.create_subprocess_exec(
                *cmd,
                env=env,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )
            
            # 等待进程完成，带超时
            try:
                stdout, stderr = await asyncio.wait_for(
                    process.communicate(), 
                    timeout=self._config.db_backup_timeout
                )
                
                if process.returncode != 0:
                    raise BackupError(f"pg_dump 失败: {stderr.decode()}")
                    
                logger.info("数据库备份成功")
                
            except asyncio.TimeoutError:
                process.kill()
                raise BackupError("数据库备份超时")
                
        except FileNotFoundError:
            # 如果系统中没有pg_dump，尝试使用SQLAlchemy进行备份
            logger.warning("未找到pg_dump，尝试使用SQLAlchemy进行表结构备份")
            await self._backup_database_with_sqlalchemy(backup_dir)
        except Exception as e:
            logger.error(f"数据库备份失败: {e}")
            raise
    
    async def _backup_database_with_sqlalchemy(self, backup_dir: Path):
        """使用SQLAlchemy备份数据库表结构"""
        # 这里实现一个基本的表结构备份
        # 注意：这不能替代完整的pg_dump，仅作为备选方案
        structure_file = backup_dir / "database_structure.sql"
        
        try:
            from sqlalchemy import inspect
            from app.core.database import Base
            
            inspector = inspect(db_manager.engine)
            tables = inspector.get_table_names()
            
            with open(structure_file, "w", encoding="utf-8") as f:
                f.write("-- MAESS数据库表结构备份 (仅表结构，无数据)\n")
                f.write(f"-- 生成时间: {datetime.now()}\n\n")
                
                for table_name in tables:
                    f.write(f"-- 表: {table_name}\n")
                    # 这里可以添加更多的表结构信息
                    # 由于SQLAlchemy不直接提供CREATE TABLE语句，这里只记录表名
                    f.write(f"-- Table: {table_name}\n\n")
            
            logger.info(f"已备份 {len(tables)} 个表的结构信息")
            
        except Exception as e:
            logger.error(f"SQLAlchemy备份失败: {e}")
            raise
    
    async def _backup_redis(self, backup_dir: Path):
        """备份Redis数据"""
        if not self._config.redis_backup_enabled:
            logger.info("Redis备份已禁用")
            return
        
        redis_client = get_redis_client()
        if not redis_client:
            logger.warning("Redis客户端未初始化，跳过Redis备份")
            return
        
        backup_file = backup_dir / "redis_backup.json"
        
        logger.info("开始备份Redis数据")
        
        try:
            # 实现Redis备份逻辑
            # 注意：这需要真正的Redis客户端实现
            # 这里提供一个示例框架
            
            # 1. 获取所有键
            # keys = await redis_client.keys('*')
            # 
            # 2. 备份每个键的值
            # backup_data = {}
            # for key in keys:
            #     key_type = await redis_client.type(key)
            #     # 根据类型获取值
            #     if key_type == b'string':
            #         value = await redis_client.get(key)
            #     elif key_type == b'hash':
            #         value = await redis_client.hgetall(key)
            #     # 其他类型...
            #     backup_data[key.decode()] = {"type": key_type.decode(), "value": value}
            # 
            # 3. 保存到文件
            # with open(backup_file, "w", encoding="utf-8") as f:
            #     json.dump(backup_data, f, ensure_ascii=False, indent=2)
            
            # 暂时创建一个空的备份文件
            with open(backup_file, "w", encoding="utf-8") as f:
                f.write("{\n  \"_comment\": \"Redis备份将在Redis客户端实现后提供完整数据\"\n}\n")
            
            logger.info("Redis备份成功")
            
        except Exception as e:
            logger.error(f"Redis备份失败: {e}")
            raise
    
    async def _backup_filesystem(self, backup_dir: Path):
        """备份文件系统"""
        if not self._config.fs_backup_enabled:
            logger.info("文件系统备份已禁用")
            return
        
        fs_backup_dir = backup_dir / "filesystem"
        fs_backup_dir.mkdir(parents=True, exist_ok=True)
        
        logger.info("开始备份文件系统")
        
        try:
            for source_dir in self._config.fs_backup_dirs:
                source_path = Path(source_dir)
                if not source_path.exists():
                    logger.warning(f"源目录不存在: {source_dir}")
                    continue
                
                # 创建目标目录
                dir_name = source_path.name
                target_dir = fs_backup_dir / dir_name
                
                # 复制目录
                shutil.copytree(source_dir, target_dir, dirs_exist_ok=True)
                logger.info(f"已备份目录: {source_dir} -> {target_dir}")
            
            logger.info("文件系统备份成功")
            
        except Exception as e:
            logger.error(f"文件系统备份失败: {e}")
            raise
    
    async def _compress_backup(self, backup_dir: Path) -> Path:
        """压缩备份目录"""
        compressed_file = backup_dir.parent / f"{backup_dir.name}.tar.gz"
        
        logger.info(f"开始压缩备份到: {compressed_file}")
        
        try:
            import tarfile
            
            with tarfile.open(compressed_file, "w:gz", compresslevel=9) as tar:
                tar.add(backup_dir, arcname=backup_dir.name)
            
            # 删除原始目录
            shutil.rmtree(backup_dir)
            
            logger.info("备份压缩成功")
            return compressed_file
            
        except Exception as e:
            logger.error(f"备份压缩失败: {e}")
            raise
    
    async def restore_backup(self, backup_id: str, restore_mode: str = RestoreMode.FULL) -> Dict[str, Any]:
        """
        恢复备份
        
        Args:
            backup_id: 备份ID
            restore_mode: 恢复模式 (full/database/redis/filesystem)
        
        Returns:
            Dict: 恢复结果
        """
        # 查找备份记录
        backup_record = None
        for record in self._backup_history:
            if record.backup_id == backup_id:
                backup_record = record
                break
        
        if not backup_record:
            raise RestoreError(f"备份不存在: {backup_id}")
        
        if backup_record.status != BackupStatus.COMPLETED:
            raise RestoreError(f"备份状态无效: {backup_record.status}")
        
        if not backup_record.path or not os.path.exists(backup_record.path):
            raise RestoreError(f"备份文件不存在: {backup_record.path}")
        
        logger.info(f"开始恢复备份: {backup_id}, 模式: {restore_mode}")
        
        temp_dir = None
        try:
            # 解压备份（如果是压缩文件）
            if backup_record.path.endswith('.tar.gz'):
                temp_dir = self._config.backup_root_dir / f"restore_temp_{int(time.time())}"
                await self._extract_backup(backup_record.path, temp_dir)
                restore_dir = temp_dir / backup_id
            else:
                restore_dir = Path(backup_record.path)
            
            # 执行恢复
            results = {}
            
            if restore_mode == RestoreMode.FULL or restore_mode == RestoreMode.DATABASE:
                results["database"] = await self._restore_database(restore_dir)
            
            if restore_mode == RestoreMode.FULL or restore_mode == RestoreMode.REDIS:
                results["redis"] = await self._restore_redis(restore_dir)
            
            if restore_mode == RestoreMode.FULL or restore_mode == RestoreMode.FILESYSTEM:
                results["filesystem"] = await self._restore_filesystem(restore_dir)
            
            logger.info(f"备份恢复成功: {backup_id}")
            return {
                "backup_id": backup_id,
                "restore_mode": restore_mode,
                "status": "success",
                "results": results
            }
            
        except Exception as e:
            logger.error(f"备份恢复失败: {backup_id}, 错误: {e}")
            raise RestoreError(f"恢复失败: {str(e)}") from e
        finally:
            # 清理临时文件
            if temp_dir and os.path.exists(temp_dir):
                shutil.rmtree(temp_dir)
    
    async def _extract_backup(self, backup_file: str, target_dir: Path):
        """解压备份文件"""
        logger.info(f"解压备份文件: {backup_file} -> {target_dir}")
        
        try:
            import tarfile
            
            with tarfile.open(backup_file, "r:gz") as tar:
                tar.extractall(target_dir)
            
            logger.info("备份解压成功")
            
        except Exception as e:
            logger.error(f"备份解压失败: {e}")
            raise
    
    async def _restore_database(self, backup_dir: Path):
        """恢复数据库"""
        backup_file = backup_dir / "database_backup.sql"
        if not backup_file.exists():
            # 检查是否有表结构备份
            structure_file = backup_dir / "database_structure.sql"
            if structure_file.exists():
                logger.warning("只有表结构备份，无法恢复完整数据")
                return {"status": "partial", "message": "只有表结构备份可用"}
            
            raise RestoreError("数据库备份文件不存在")
        
        logger.info("开始恢复数据库")
        
        try:
            db_config = settings.DATABASE
            
            # 使用pg_restore命令恢复数据库
            cmd = [
                "pg_restore",
                f"--host={db_config.host}",
                f"--port={db_config.port}",
                f"--username={db_config.username}",
                f"--dbname={db_config.database}",
                f"--verbose",
                f"--clean",  # 先删除数据库对象
                f"--if-exists",
                str(backup_file)
            ]
            
            # 设置环境变量以避免密码提示
            env = os.environ.copy()
            env["PGPASSWORD"] = db_config.password
            
            # 执行恢复命令
            process = await asyncio.create_subprocess_exec(
                *cmd,
                env=env,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )
            
            stdout, stderr = await process.communicate()
            
            if process.returncode != 0:
                raise RestoreError(f"pg_restore 失败: {stderr.decode()}")
                
            logger.info("数据库恢复成功")
            return {"status": "success"}
            
        except FileNotFoundError:
            raise RestoreError("未找到pg_restore命令，请确保PostgreSQL客户端已安装")
        except Exception as e:
            logger.error(f"数据库恢复失败: {e}")
            raise
    
    async def _restore_redis(self, backup_dir: Path):
        """恢复Redis数据"""
        backup_file = backup_dir / "redis_backup.json"
        if not backup_file.exists():
            raise RestoreError("Redis备份文件不存在")
        
        redis_client = get_redis_client()
        if not redis_client:
            logger.warning("Redis客户端未初始化，跳过Redis恢复")
            return {"status": "skipped", "message": "Redis客户端未初始化"}
        
        logger.info("开始恢复Redis数据")
        
        try:
            # 实现Redis恢复逻辑
            # 注意：这需要真正的Redis客户端实现
            # 这里提供一个示例框架
            
            # with open(backup_file, "r", encoding="utf-8") as f:
            #     backup_data = json.load(f)
            #     
            # for key, data in backup_data.items():
            #     key_type = data["type"]
            #     value = data["value"]
            #     
            #     if key_type == "string":
            #         await redis_client.set(key, value)
            #     elif key_type == "hash":
            #         await redis_client.hset(key, mapping=value)
            #     # 其他类型...
            
            logger.info("Redis恢复成功")
            return {"status": "success"}
            
        except Exception as e:
            logger.error(f"Redis恢复失败: {e}")
            raise
    
    async def _restore_filesystem(self, backup_dir: Path):
        """恢复文件系统"""
        fs_backup_dir = backup_dir / "filesystem"
        if not fs_backup_dir.exists():
            raise RestoreError("文件系统备份目录不存在")
        
        logger.info("开始恢复文件系统")
        
        try:
            restored_count = 0
            
            # 获取所有备份的目录
            for backup_subdir in fs_backup_dir.iterdir():
                if not backup_subdir.is_dir():
                    continue
                
                # 查找对应的源目录
                target_dir = None
                for source_dir in self._config.fs_backup_dirs:
                    if Path(source_dir).name == backup_subdir.name:
                        target_dir = source_dir
                        break
                
                if target_dir:
                    # 确保目标目录存在
                    os.makedirs(target_dir, exist_ok=True)
                    
                    # 复制文件
                    shutil.copytree(backup_subdir, target_dir, dirs_exist_ok=True)
                    logger.info(f"已恢复目录: {backup_subdir.name} -> {target_dir}")
                    restored_count += 1
                else:
                    logger.warning(f"找不到对应的目标目录，跳过: {backup_subdir.name}")
            
            logger.info(f"文件系统恢复完成，共恢复 {restored_count} 个目录")
            return {"status": "success", "restored_directories": restored_count}
            
        except Exception as e:
            logger.error(f"文件系统恢复失败: {e}")
            raise
    
    def list_backups(self, backup_type: Optional[str] = None) -> List[BackupRecord]:
        """
        列出所有备份
        
        Args:
            backup_type: 可选的备份类型过滤
        
        Returns:
            List[BackupRecord]: 备份记录列表
        """
        if backup_type:
            return [r for r in self._backup_history if r.backup_type == backup_type]
        return self._backup_history
    
    def get_backup_info(self, backup_id: str) -> Optional[BackupRecord]:
        """
        获取备份信息
        
        Args:
            backup_id: 备份ID
        
        Returns:
            BackupRecord: 备份记录，如果不存在则返回None
        """
        for record in self._backup_history:
            if record.backup_id == backup_id:
                return record
        return None
    
    def _get_path_size(self, path: str) -> int:
        """计算路径大小（字节）"""
        if os.path.isfile(path):
            return os.path.getsize(path)
        
        total_size = 0
        for dirpath, dirnames, filenames in os.walk(path):
            for filename in filenames:
                filepath = os.path.join(dirpath, filename)
                total_size += os.path.getsize(filepath)
        
        return total_size
    
    def _format_size(self, size_bytes: int) -> str:
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
            if size_bytes < 1024.0:
                return f"{size_bytes:.2f} {unit}"
            size_bytes /= 1024.0
        return f"{size_bytes:.2f} PB"
    
    async def close(self):
        """关闭备份管理器"""
        # 等待所有活动备份完成
        if self._active_backups:
            logger.info(f"等待 {len(self._active_backups)} 个正在进行的备份完成...")
            await asyncio.wait(self._active_backups.values())
        
        logger.info("备份管理器已关闭")


# 创建全局备份管理器实例
backup_manager = BackupManager()


# 辅助函数
def get_backup_manager() -> BackupManager:
    """获取备份管理器实例"""
    return backup_manager


async def initialize_backup_manager():
    """初始化备份管理器"""
    manager = get_backup_manager()
    await manager.initialize()
    return manager


async def create_backup(backup_type: str = BackupType.FULL) -> BackupRecord:
    """创建备份的便捷函数"""
    manager = get_backup_manager()
    if not manager._initialized:
        await manager.initialize()
    return await manager.create_backup(backup_type)


async def restore_backup(backup_id: str, restore_mode: str = RestoreMode.FULL) -> Dict[str, Any]:
    """恢复备份的便捷函数"""
    manager = get_backup_manager()
    if not manager._initialized:
        await manager.initialize()
    return await manager.restore_backup(backup_id, restore_mode)


async def list_available_backups(backup_type: Optional[str] = None) -> List[Dict[str, Any]]:
    """列出所有可用备份的便捷函数"""
    manager = get_backup_manager()
    if not manager._initialized:
        await manager.initialize()
    backups = manager.list_backups(backup_type)
    return [backup.to_dict() for backup in backups]
