"""
数据库配置和连接管理
Enhanced for camera data persistence with optimized connection pooling,
transaction management, and performance monitoring.
"""
from sqlalchemy import create_engine, MetaData, event, text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.pool import StaticPool, QueuePool
from sqlalchemy.engine import Engine
from contextlib import contextmanager, asynccontextmanager
import structlog
import os
import sqlite3
import asyncio
from typing import Generator, Optional, Dict, Any, List
from datetime import datetime, timedelta
import threading
import time
from core.config import settings

logger = structlog.get_logger(__name__)

# 数据库连接池监控
class ConnectionPoolMonitor:
    """数据库连接池监控器"""
    def __init__(self):
        self.stats = {
            "total_connections": 0,
            "active_connections": 0,
            "pool_hits": 0,
            "pool_misses": 0,
            "connection_errors": 0,
            "last_reset": datetime.now()
        }
        self._lock = threading.Lock()
    
    def record_connection(self):
        with self._lock:
            self.stats["total_connections"] += 1
    
    def record_pool_hit(self):
        with self._lock:
            self.stats["pool_hits"] += 1
    
    def record_pool_miss(self):
        with self._lock:
            self.stats["pool_misses"] += 1
    
    def record_error(self):
        with self._lock:
            self.stats["connection_errors"] += 1
    
    def get_stats(self) -> Dict[str, Any]:
        with self._lock:
            return self.stats.copy()
    
    def reset_stats(self):
        with self._lock:
            self.stats = {
                "total_connections": 0,
                "active_connections": 0,
                "pool_hits": 0,
                "pool_misses": 0,
                "connection_errors": 0,
                "last_reset": datetime.now()
            }

# 全局连接池监控器
pool_monitor = ConnectionPoolMonitor()

# 数据库引擎配置
def create_database_engine():
    """创建数据库引擎 - 针对摄像头数据持久化优化"""
    if settings.DATABASE_URL.startswith("sqlite"):
        # SQLite配置 - 针对摄像头数据持久化优化
        engine = create_engine(
            settings.DATABASE_URL,
            connect_args={
                "check_same_thread": False,
                "timeout": 30,  # 增加超时时间
                "isolation_level": None,  # 启用自动提交模式
            },
            poolclass=StaticPool,
            pool_pre_ping=True,  # 启用连接预检查
            pool_recycle=3600,  # 1小时回收连接
            echo=settings.LOG_LEVEL == "DEBUG",  # 调试模式下显示SQL
            echo_pool=settings.LOG_LEVEL == "DEBUG",  # 调试模式下显示连接池信息
        )
        
        # SQLite性能优化配置
        @event.listens_for(engine, "connect")
        def set_sqlite_pragma(dbapi_connection, connection_record):
            """设置SQLite性能优化参数 - 针对摄像头数据优化"""
            cursor = dbapi_connection.cursor()
            try:
                # 启用WAL模式以支持并发读写
                cursor.execute("PRAGMA journal_mode=WAL")
                # 设置同步模式为NORMAL以平衡性能和安全性
                cursor.execute("PRAGMA synchronous=NORMAL")
                # 增加缓存大小 (20MB for camera data)
                cursor.execute("PRAGMA cache_size=20000")
                # 启用外键约束
                cursor.execute("PRAGMA foreign_keys=ON")
                # 设置临时存储为内存
                cursor.execute("PRAGMA temp_store=MEMORY")
                # 设置mmap大小 (512MB for better performance)
                cursor.execute("PRAGMA mmap_size=536870912")
                # 设置页面大小为4KB (适合摄像头数据)
                cursor.execute("PRAGMA page_size=4096")
                # 启用查询优化器
                cursor.execute("PRAGMA optimize")
                # 设置忙等待超时
                cursor.execute("PRAGMA busy_timeout=30000")
                # 记录连接
                pool_monitor.record_connection()
                logger.debug("SQLite性能优化参数设置完成")
            except Exception as e:
                logger.error("设置SQLite参数失败", exc_info=e)
                pool_monitor.record_error()
            finally:
                cursor.close()
        
        # 连接池事件监听
        @event.listens_for(engine, "checkout")
        def receive_checkout(dbapi_connection, connection_record, connection_proxy):
            """连接池检出事件"""
            pool_monitor.record_pool_hit()
        
        @event.listens_for(engine, "connect")
        def receive_connect(dbapi_connection, connection_record):
            """新连接创建事件"""
            pool_monitor.record_pool_miss()
            
    else:
        # 其他数据库配置（PostgreSQL, MySQL等）- 针对摄像头数据优化
        engine = create_engine(
            settings.DATABASE_URL,
            poolclass=QueuePool,
            pool_size=20,  # 连接池大小
            max_overflow=30,  # 最大溢出连接数
            pool_pre_ping=True,
            pool_recycle=3600,
            pool_timeout=30,  # 连接超时
            pool_reset_on_return='commit',  # 连接返回时重置
            echo=settings.LOG_LEVEL == "DEBUG",
        )
        
        # 其他数据库的连接池监控
        @event.listens_for(engine, "checkout")
        def receive_checkout_other(dbapi_connection, connection_record, connection_proxy):
            pool_monitor.record_pool_hit()
        
        @event.listens_for(engine, "connect")
        def receive_connect_other(dbapi_connection, connection_record):
            pool_monitor.record_pool_miss()
    
    logger.info(f"数据库引擎创建成功: {settings.DATABASE_URL}")
    return engine

# 创建数据库引擎
engine = create_database_engine()

# 创建会话工厂 - 针对摄像头数据操作优化
SessionLocal = sessionmaker(
    autocommit=False, 
    autoflush=False, 
    bind=engine,
    expire_on_commit=False,  # 防止对象在提交后过期
    class_=Session  # 使用标准Session类
)

# 会话管理器
class DatabaseSessionManager:
    """数据库会话管理器 - 提供高级会话管理功能"""
    
    def __init__(self):
        self._sessions = {}
        self._lock = threading.Lock()
    
    def create_session(self, session_id: Optional[str] = None) -> Session:
        """创建新的数据库会话"""
        session = SessionLocal()
        if session_id:
            with self._lock:
                self._sessions[session_id] = session
        return session
    
    def get_session(self, session_id: str) -> Optional[Session]:
        """获取指定ID的会话"""
        with self._lock:
            return self._sessions.get(session_id)
    
    def close_session(self, session_id: str):
        """关闭指定ID的会话"""
        with self._lock:
            session = self._sessions.pop(session_id, None)
            if session:
                session.close()
    
    def close_all_sessions(self):
        """关闭所有会话"""
        with self._lock:
            for session in self._sessions.values():
                try:
                    session.close()
                except Exception as e:
                    logger.error(f"关闭会话失败: {e}")
            self._sessions.clear()

# 全局会话管理器
session_manager = DatabaseSessionManager()

# 创建基础模型类
Base = declarative_base()

# 元数据
metadata = MetaData()

# 增强的数据库健康检查
def check_database_health(detailed: bool = False) -> Dict[str, Any]:
    """检查数据库连接健康状态 - 返回详细信息"""
    health_info = {
        "healthy": False,
        "response_time": 0,
        "connection_pool_stats": {},
        "database_info": {},
        "timestamp": datetime.now().isoformat()
    }
    
    start_time = time.time()
    
    try:
        with engine.connect() as conn:
            # 基本连接测试
            conn.execute(text("SELECT 1"))
            
            if detailed:
                # 获取数据库信息
                if settings.DATABASE_URL.startswith("sqlite"):
                    result = conn.execute(text("PRAGMA database_list")).fetchall()
                    health_info["database_info"]["databases"] = [dict(row._mapping) for row in result]
                    
                    # 获取表信息
                    result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table'")).fetchall()
                    health_info["database_info"]["tables"] = [row[0] for row in result]
                    
                    # 获取数据库大小
                    result = conn.execute(text("PRAGMA page_count")).fetchone()
                    page_count = result[0] if result else 0
                    result = conn.execute(text("PRAGMA page_size")).fetchone()
                    page_size = result[0] if result else 0
                    health_info["database_info"]["size_bytes"] = page_count * page_size
        
        health_info["healthy"] = True
        health_info["response_time"] = time.time() - start_time
        health_info["connection_pool_stats"] = pool_monitor.get_stats()
        
        logger.info(f"数据库连接健康检查通过 (响应时间: {health_info['response_time']:.3f}s)")
        
    except Exception as e:
        health_info["response_time"] = time.time() - start_time
        health_info["error"] = str(e)
        logger.error(f"数据库连接健康检查失败 (响应时间: {health_info['response_time']:.3f}s)", exc_info=e)
        pool_monitor.record_error()
    
    return health_info

def get_database_stats() -> Dict[str, Any]:
    """获取数据库统计信息"""
    stats = {
        "connection_pool": pool_monitor.get_stats(),
        "engine_info": {
            "url": str(engine.url).replace(engine.url.password or "", "***") if engine.url.password else str(engine.url),
            "pool_size": getattr(engine.pool, 'size', 'N/A'),
            "checked_out": getattr(engine.pool, 'checkedout', 'N/A'),
            "overflow": getattr(engine.pool, 'overflow', 'N/A'),
        },
        "timestamp": datetime.now().isoformat()
    }
    
    try:
        with engine.connect() as conn:
            if settings.DATABASE_URL.startswith("sqlite"):
                # SQLite特定统计
                result = conn.execute(text("PRAGMA cache_size")).fetchone()
                stats["sqlite_cache_size"] = result[0] if result else 0
                
                result = conn.execute(text("PRAGMA page_count")).fetchone()
                stats["sqlite_page_count"] = result[0] if result else 0
                
                result = conn.execute(text("PRAGMA freelist_count")).fetchone()
                stats["sqlite_freelist_count"] = result[0] if result else 0
                
    except Exception as e:
        logger.error("获取数据库统计信息失败", exc_info=e)
        stats["error"] = str(e)
    
    return stats

# 增强的事务管理上下文管理器
@contextmanager
def get_db_transaction(
    isolation_level: Optional[str] = None,
    timeout: Optional[int] = None
) -> Generator[Session, None, None]:
    """获取数据库事务会话 - 支持隔离级别和超时设置"""
    db = SessionLocal()
    start_time = time.time()
    
    try:
        # 设置事务隔离级别
        if isolation_level:
            db.execute(text(f"SET TRANSACTION ISOLATION LEVEL {isolation_level}"))
        
        yield db
        
        # 检查超时
        if timeout and (time.time() - start_time) > timeout:
            raise TimeoutError(f"事务超时: {timeout}秒")
        
        db.commit()
        logger.debug(f"数据库事务提交成功 (耗时: {time.time() - start_time:.3f}s)")
        
    except Exception as e:
        db.rollback()
        logger.error(f"数据库事务回滚 (耗时: {time.time() - start_time:.3f}s)", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()

# 只读会话上下文管理器
@contextmanager
def get_db_readonly(timeout: Optional[int] = None) -> Generator[Session, None, None]:
    """获取只读数据库会话 - 支持超时设置"""
    db = SessionLocal()
    start_time = time.time()
    
    try:
        yield db
        
        # 检查超时
        if timeout and (time.time() - start_time) > timeout:
            logger.warning(f"只读查询超时: {timeout}秒")
            
    except Exception as e:
        logger.error(f"只读数据库会话异常 (耗时: {time.time() - start_time:.3f}s)", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()

# 批量操作事务管理器
@contextmanager
def get_db_batch_transaction(batch_size: int = 1000) -> Generator[Session, None, None]:
    """获取批量操作事务会话 - 适用于大量摄像头数据操作"""
    db = SessionLocal()
    try:
        # 禁用自动刷新以提高批量操作性能
        db.autoflush = False
        yield db
        db.commit()
        logger.debug(f"批量事务提交成功 (批次大小: {batch_size})")
    except Exception as e:
        db.rollback()
        logger.error("批量事务回滚", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()

# 异步上下文管理器
@asynccontextmanager
async def get_async_db_transaction() -> Generator[Session, None, None]:
    """异步数据库事务会话"""
    db = SessionLocal()
    try:
        yield db
        db.commit()
        logger.debug("异步数据库事务提交成功")
    except Exception as e:
        db.rollback()
        logger.error("异步数据库事务回滚", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()


async def init_db():
    """初始化数据库"""
    try:
        logger.info("开始初始化数据库...")
        
        # 确保数据目录存在
        db_dir = os.path.dirname(settings.DATABASE_URL.replace("sqlite:///", ""))
        if db_dir and not os.path.exists(db_dir):
            os.makedirs(db_dir, exist_ok=True)
            logger.info(f"创建数据库目录: {db_dir}")
        
        # 检查数据库连接
        if not check_database_health():
            raise Exception("数据库连接检查失败")
        
        # 导入所有模型以确保它们被注册
        from models import Camera, AITask, AIResult, Alert, User, SystemConfig
        
        # 创建所有表
        Base.metadata.create_all(bind=engine)
        logger.info("数据库表创建成功")
        
        # 创建索引
        await create_database_indexes()
        
        # 创建初始数据
        await create_initial_data()
        
        logger.info("数据库初始化完成")
        
    except Exception as e:
        logger.error("数据库初始化失败", exc_info=e)
        raise

async def create_database_indexes():
    """创建数据库索引"""
    try:
        with engine.connect() as conn:
            # 摄像头表索引（如果不存在）
            indexes = [
                "CREATE INDEX IF NOT EXISTS idx_cameras_name ON cameras(name)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_status ON cameras(status)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_location ON cameras(longitude, latitude)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_wvp ON cameras(wvp_device_id, wvp_channel_id)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_ai_enabled ON cameras(ai_enabled)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_created_at ON cameras(created_at)",
                "CREATE INDEX IF NOT EXISTS idx_cameras_updated_at ON cameras(updated_at)",
            ]
            
            for index_sql in indexes:
                conn.execute(text(index_sql))
            
            conn.commit()
            logger.info("数据库索引创建成功")
            
    except Exception as e:
        logger.error("数据库索引创建失败", exc_info=e)
        raise

async def create_initial_data():
    """创建初始数据"""
    try:
        # 使用事务管理器创建初始数据
        with get_db_transaction() as db:
            from models import Camera, SystemConfig
            
            # 检查是否已有数据
            camera_count = db.query(Camera).count()
            if camera_count == 0:
                logger.info("创建示例摄像头数据...")
                # 这里可以添加示例数据，但通常在生产环境中不需要
                # 具体的初始数据创建在 init_db.py 中实现
            
            # 创建系统配置
            config_count = db.query(SystemConfig).count()
            if config_count == 0:
                logger.info("创建系统配置数据...")
                # 系统配置将在 init_db.py 中创建
        
        logger.info("初始数据创建完成")
    except Exception as e:
        logger.error("初始数据创建失败", exc_info=e)

def get_db() -> Generator[Session, None, None]:
    """获取数据库会话 - FastAPI依赖注入使用"""
    db = SessionLocal()
    start_time = time.time()
    
    try:
        yield db
    except Exception as e:
        db.rollback()
        logger.error(f"数据库会话异常 (耗时: {time.time() - start_time:.3f}s)", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()
        logger.debug(f"数据库会话关闭 (耗时: {time.time() - start_time:.3f}s)")

def get_db_readonly_dependency() -> Generator[Session, None, None]:
    """获取只读数据库会话 - FastAPI依赖注入使用"""
    db = SessionLocal()
    start_time = time.time()
    
    try:
        # 设置为只读模式（如果支持）
        if hasattr(db, 'read_only'):
            db.read_only = True
        yield db
    except Exception as e:
        logger.error(f"只读数据库会话异常 (耗时: {time.time() - start_time:.3f}s)", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()

async def get_async_db() -> Generator[Session, None, None]:
    """获取异步数据库会话 - 支持异步操作"""
    db = SessionLocal()
    start_time = time.time()
    
    try:
        yield db
    except Exception as e:
        db.rollback()
        logger.error(f"异步数据库会话异常 (耗时: {time.time() - start_time:.3f}s)", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()
        logger.debug(f"异步数据库会话关闭 (耗时: {time.time() - start_time:.3f}s)")

# 专用于摄像头数据的数据库会话
def get_camera_db() -> Generator[Session, None, None]:
    """获取专用于摄像头数据的数据库会话 - 优化配置"""
    db = SessionLocal()
    start_time = time.time()
    
    try:
        # 针对摄像头数据查询优化
        db.execute(text("PRAGMA query_only = 0"))  # 允许写操作
        db.execute(text("PRAGMA cache_size = 20000"))  # 增大缓存
        yield db
    except Exception as e:
        db.rollback()
        logger.error(f"摄像头数据库会话异常 (耗时: {time.time() - start_time:.3f}s)", exc_info=e)
        pool_monitor.record_error()
        raise
    finally:
        db.close()
        logger.debug(f"摄像头数据库会话关闭 (耗时: {time.time() - start_time:.3f}s)")

def close_db_connections():
    """关闭数据库连接和清理资源"""
    try:
        # 关闭所有会话
        session_manager.close_all_sessions()
        
        # 关闭引擎
        engine.dispose()
        
        # 重置监控统计
        pool_monitor.reset_stats()
        
        logger.info("数据库连接和资源清理完成")
    except Exception as e:
        logger.error("关闭数据库连接失败", exc_info=e)

def optimize_database():
    """优化数据库性能"""
    try:
        with engine.connect() as conn:
            if settings.DATABASE_URL.startswith("sqlite"):
                # SQLite优化操作
                logger.info("开始SQLite数据库优化...")
                
                # 分析查询计划
                conn.execute(text("ANALYZE"))
                
                # 优化数据库
                conn.execute(text("PRAGMA optimize"))
                
                # 整理数据库（减少文件大小）
                conn.execute(text("VACUUM"))
                
                # 重建索引
                conn.execute(text("REINDEX"))
                
                conn.commit()
                logger.info("SQLite数据库优化完成")
            else:
                # 其他数据库的优化操作
                logger.info("执行数据库统计信息更新...")
                # 这里可以添加其他数据库的优化操作
                
    except Exception as e:
        logger.error("数据库优化失败", exc_info=e)
        raise

def cleanup_old_connections():
    """清理过期连接"""
    try:
        # 强制回收连接池中的连接
        if hasattr(engine.pool, 'recreate'):
            engine.pool.recreate()
        
        logger.info("过期连接清理完成")
    except Exception as e:
        logger.error("清理过期连接失败", exc_info=e)

# 增强的数据库备份和恢复功能
def backup_database(
    backup_path: str = None, 
    compress: bool = True,
    include_metadata: bool = True
) -> Dict[str, Any]:
    """备份数据库 - 支持压缩和元数据"""
    if not settings.DATABASE_URL.startswith("sqlite"):
        raise ValueError("数据库备份功能仅支持SQLite")
    
    try:
        import shutil
        import gzip
        import json
        from datetime import datetime
        
        # 获取数据库文件路径
        db_file = settings.DATABASE_URL.replace("sqlite:///", "")
        
        if backup_path is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            backup_dir = os.path.join(os.path.dirname(db_file), "backups")
            os.makedirs(backup_dir, exist_ok=True)
            backup_path = os.path.join(backup_dir, f"campus_twin_backup_{timestamp}.db")
        
        backup_info = {
            "backup_path": backup_path,
            "original_size": 0,
            "backup_size": 0,
            "timestamp": datetime.now().isoformat(),
            "compressed": compress,
            "metadata_included": include_metadata
        }
        
        # 获取原始文件大小
        if os.path.exists(db_file):
            backup_info["original_size"] = os.path.getsize(db_file)
        
        # 执行备份
        if compress:
            # 压缩备份
            with open(db_file, 'rb') as f_in:
                with gzip.open(f"{backup_path}.gz", 'wb') as f_out:
                    shutil.copyfileobj(f_in, f_out)
            backup_path = f"{backup_path}.gz"
            backup_info["backup_path"] = backup_path
        else:
            # 普通备份
            shutil.copy2(db_file, backup_path)
        
        # 获取备份文件大小
        backup_info["backup_size"] = os.path.getsize(backup_path)
        
        # 包含元数据
        if include_metadata:
            metadata = {
                "backup_info": backup_info,
                "database_stats": get_database_stats(),
                "health_check": check_database_health(detailed=True)
            }
            
            metadata_path = f"{backup_path}.metadata.json"
            with open(metadata_path, 'w', encoding='utf-8') as f:
                json.dump(metadata, f, indent=2, ensure_ascii=False)
            
            backup_info["metadata_path"] = metadata_path
        
        logger.info(f"数据库备份成功: {backup_path} (压缩率: {backup_info['backup_size']/backup_info['original_size']*100:.1f}%)")
        return backup_info
        
    except Exception as e:
        logger.error("数据库备份失败", exc_info=e)
        raise

def restore_database(backup_path: str, verify: bool = True) -> Dict[str, Any]:
    """恢复数据库 - 支持验证"""
    if not settings.DATABASE_URL.startswith("sqlite"):
        raise ValueError("数据库恢复功能仅支持SQLite")
    
    try:
        import shutil
        import gzip
        import json
        from datetime import datetime
        
        # 获取数据库文件路径
        db_file = settings.DATABASE_URL.replace("sqlite:///", "")
        
        restore_info = {
            "backup_path": backup_path,
            "restore_path": db_file,
            "timestamp": datetime.now().isoformat(),
            "success": False
        }
        
        # 检查备份文件是否存在
        if not os.path.exists(backup_path):
            raise FileNotFoundError(f"备份文件不存在: {backup_path}")
        
        # 创建当前数据库的备份
        if os.path.exists(db_file):
            current_backup = f"{db_file}.pre_restore_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
            shutil.copy2(db_file, current_backup)
            restore_info["pre_restore_backup"] = current_backup
        
        # 关闭所有连接
        close_db_connections()
        
        # 执行恢复
        if backup_path.endswith('.gz'):
            # 解压恢复
            with gzip.open(backup_path, 'rb') as f_in:
                with open(db_file, 'wb') as f_out:
                    shutil.copyfileobj(f_in, f_out)
        else:
            # 普通恢复
            shutil.copy2(backup_path, db_file)
        
        # 重新创建引擎
        global engine, SessionLocal
        engine = create_database_engine()
        SessionLocal = sessionmaker(
            autocommit=False, 
            autoflush=False, 
            bind=engine,
            expire_on_commit=False
        )
        
        # 验证恢复结果
        if verify:
            health_check = check_database_health(detailed=True)
            restore_info["health_check"] = health_check
            if not health_check["healthy"]:
                raise Exception("数据库恢复后健康检查失败")
        
        restore_info["success"] = True
        logger.info(f"数据库恢复成功: {backup_path} -> {db_file}")
        return restore_info
        
    except Exception as e:
        logger.error("数据库恢复失败", exc_info=e)
        restore_info["error"] = str(e)
        raise

def list_backups(backup_dir: str = None) -> List[Dict[str, Any]]:
    """列出可用的备份文件"""
    if backup_dir is None:
        db_file = settings.DATABASE_URL.replace("sqlite:///", "")
        backup_dir = os.path.join(os.path.dirname(db_file), "backups")
    
    backups = []
    
    if not os.path.exists(backup_dir):
        return backups
    
    try:
        for filename in os.listdir(backup_dir):
            if filename.endswith('.db') or filename.endswith('.db.gz'):
                filepath = os.path.join(backup_dir, filename)
                backup_info = {
                    "filename": filename,
                    "filepath": filepath,
                    "size": os.path.getsize(filepath),
                    "created_time": datetime.fromtimestamp(os.path.getctime(filepath)).isoformat(),
                    "modified_time": datetime.fromtimestamp(os.path.getmtime(filepath)).isoformat(),
                    "compressed": filename.endswith('.gz')
                }
                
                # 尝试读取元数据
                metadata_path = f"{filepath}.metadata.json"
                if os.path.exists(metadata_path):
                    try:
                        with open(metadata_path, 'r', encoding='utf-8') as f:
                            metadata = json.load(f)
                        backup_info["metadata"] = metadata
                    except Exception as e:
                        logger.warning(f"读取备份元数据失败: {e}")
                
                backups.append(backup_info)
        
        # 按创建时间排序
        backups.sort(key=lambda x: x["created_time"], reverse=True)
        
    except Exception as e:
        logger.error("列出备份文件失败", exc_info=e)
    
    return backups