#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
数据库连接池最佳实践 - Python实现详解

本文件展示Python中数据库连接池的最佳实践，包括SQLAlchemy连接池、
异步连接池、Redis连接池、MongoDB连接池等的配置和使用。

Python连接池特点：
1. SQLAlchemy内置连接池管理
2. 异步连接池支持
3. 连接池监控和调优
4. 多数据库连接池统一管理
5. 连接池健康检查

作者: Assistant
日期: 2024
"""

import asyncio
import logging
import time
import threading
from datetime import datetime, timedelta
from typing import List, Optional, Dict, Any, AsyncGenerator
from contextlib import asynccontextmanager, contextmanager
from dataclasses import dataclass, field
from concurrent.futures import ThreadPoolExecutor

# FastAPI相关导入
from fastapi import FastAPI, Depends, HTTPException, BackgroundTasks
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field

# SQLAlchemy连接池
from sqlalchemy import create_engine, text, event, pool
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
from sqlalchemy.pool import QueuePool, NullPool, StaticPool
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker, Session

# Redis连接池
import redis.asyncio as aioredis
# from aioredis import ConnectionPool as AsyncRedisPool  # 已更新为使用redis.asyncio
import redis
from redis import ConnectionPool as SyncRedisPool

# MongoDB连接池
import motor.motor_asyncio
import pymongo
from pymongo import MongoClient

# PostgreSQL异步驱动
import asyncpg

# 监控和指标
import psutil
from collections import defaultdict, deque

# ============================================================================
# 1. 连接池配置类
# ============================================================================

@dataclass
class DatabasePoolConfig:
    """数据库连接池配置"""
    
    # 基本连接信息
    host: str = "localhost"
    port: int = 5432
    database: str = "testdb"
    username: str = "user"
    password: str = "password"
    
    # 连接池配置
    pool_size: int = 10  # 连接池大小
    max_overflow: int = 20  # 最大溢出连接数
    pool_timeout: int = 30  # 获取连接超时时间（秒）
    pool_recycle: int = 3600  # 连接回收时间（秒）
    pool_pre_ping: bool = True  # 连接前ping检查
    
    # 高级配置
    echo: bool = False  # 是否打印SQL
    echo_pool: bool = False  # 是否打印连接池日志
    pool_reset_on_return: str = "commit"  # 连接返回时的重置方式
    
    def get_sync_url(self) -> str:
        """获取同步数据库URL"""
        return f"postgresql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}"
    
    def get_async_url(self) -> str:
        """获取异步数据库URL"""
        return f"postgresql+asyncpg://{self.username}:{self.password}@{self.host}:{self.port}/{self.database}"

@dataclass
class RedisPoolConfig:
    """Redis连接池配置"""
    
    host: str = "localhost"
    port: int = 6379
    db: int = 0
    password: Optional[str] = None
    
    # 连接池配置
    max_connections: int = 50
    retry_on_timeout: bool = True
    socket_timeout: float = 5.0
    socket_connect_timeout: float = 5.0
    socket_keepalive: bool = True
    socket_keepalive_options: Dict[str, int] = field(default_factory=lambda: {})
    
    # 健康检查
    health_check_interval: int = 30
    
    def get_url(self) -> str:
        """获取Redis URL"""
        auth = f":{self.password}@" if self.password else ""
        return f"redis://{auth}{self.host}:{self.port}/{self.db}"

@dataclass
class MongoPoolConfig:
    """MongoDB连接池配置"""
    
    host: str = "localhost"
    port: int = 27017
    database: str = "testdb"
    username: Optional[str] = None
    password: Optional[str] = None
    
    # 连接池配置
    max_pool_size: int = 100
    min_pool_size: int = 0
    max_idle_time_ms: int = 30000
    wait_queue_timeout_ms: int = 5000
    
    # 服务器选择
    server_selection_timeout_ms: int = 30000
    
    def get_url(self) -> str:
        """获取MongoDB URL"""
        auth = f"{self.username}:{self.password}@" if self.username and self.password else ""
        return f"mongodb://{auth}{self.host}:{self.port}/{self.database}"

# ============================================================================
# 2. 连接池监控类
# ============================================================================

class PoolMonitor:
    """连接池监控器"""
    
    def __init__(self, name: str):
        self.name = name
        self.stats = {
            "total_connections": 0,
            "active_connections": 0,
            "idle_connections": 0,
            "total_requests": 0,
            "successful_requests": 0,
            "failed_requests": 0,
            "avg_response_time": 0.0,
            "max_response_time": 0.0,
            "min_response_time": float('inf')
        }
        self.response_times = deque(maxlen=1000)  # 保留最近1000次请求的响应时间
        self.lock = threading.Lock()
    
    def record_request(self, response_time: float, success: bool = True):
        """记录请求"""
        with self.lock:
            self.stats["total_requests"] += 1
            
            if success:
                self.stats["successful_requests"] += 1
            else:
                self.stats["failed_requests"] += 1
            
            self.response_times.append(response_time)
            
            # 更新响应时间统计
            if self.response_times:
                self.stats["avg_response_time"] = sum(self.response_times) / len(self.response_times)
                self.stats["max_response_time"] = max(self.response_times)
                self.stats["min_response_time"] = min(self.response_times)
    
    def update_pool_stats(self, total: int, active: int, idle: int):
        """更新连接池统计"""
        with self.lock:
            self.stats["total_connections"] = total
            self.stats["active_connections"] = active
            self.stats["idle_connections"] = idle
    
    def get_stats(self) -> Dict[str, Any]:
        """获取统计信息"""
        with self.lock:
            return {
                "pool_name": self.name,
                "timestamp": datetime.now().isoformat(),
                **self.stats.copy(),
                "success_rate": (
                    self.stats["successful_requests"] / self.stats["total_requests"] * 100
                    if self.stats["total_requests"] > 0 else 0
                )
            }
    
    def reset_stats(self):
        """重置统计信息"""
        with self.lock:
            self.stats = {
                "total_connections": 0,
                "active_connections": 0,
                "idle_connections": 0,
                "total_requests": 0,
                "successful_requests": 0,
                "failed_requests": 0,
                "avg_response_time": 0.0,
                "max_response_time": 0.0,
                "min_response_time": float('inf')
            }
            self.response_times.clear()

# ============================================================================
# 3. SQLAlchemy连接池管理器
# ============================================================================

class SQLAlchemyPoolManager:
    """SQLAlchemy连接池管理器"""
    
    def __init__(self, config: DatabasePoolConfig):
        self.config = config
        self.monitor = PoolMonitor("SQLAlchemy")
        
        # 创建同步引擎
        self.sync_engine = self._create_sync_engine()
        
        # 创建异步引擎
        self.async_engine = self._create_async_engine()
        
        # 会话工厂
        self.sync_session_factory = sessionmaker(bind=self.sync_engine)
        self.async_session_factory = async_sessionmaker(
            self.async_engine,
            class_=AsyncSession,
            expire_on_commit=False
        )
        
        # 注册事件监听器
        self._register_events()
    
    def _create_sync_engine(self) -> Engine:
        """创建同步引擎"""
        return create_engine(
            self.config.get_sync_url(),
            poolclass=QueuePool,
            pool_size=self.config.pool_size,
            max_overflow=self.config.max_overflow,
            pool_timeout=self.config.pool_timeout,
            pool_recycle=self.config.pool_recycle,
            pool_pre_ping=self.config.pool_pre_ping,
            pool_reset_on_return=self.config.pool_reset_on_return,
            echo=self.config.echo,
            echo_pool=self.config.echo_pool,
            # 连接参数
            connect_args={
                "connect_timeout": 10,
                "application_name": "FastAPI_App"
            }
        )
    
    def _create_async_engine(self):
        """创建异步引擎"""
        return create_async_engine(
            self.config.get_async_url(),
            poolclass=QueuePool,
            pool_size=self.config.pool_size,
            max_overflow=self.config.max_overflow,
            pool_timeout=self.config.pool_timeout,
            pool_recycle=self.config.pool_recycle,
            pool_pre_ping=self.config.pool_pre_ping,
            echo=self.config.echo,
            echo_pool=self.config.echo_pool,
            # 异步连接参数
            connect_args={
                "command_timeout": 10,
                "server_settings": {
                    "application_name": "FastAPI_Async_App"
                }
            }
        )
    
    def _register_events(self):
        """注册事件监听器"""
        
        @event.listens_for(self.sync_engine, "connect")
        def set_sqlite_pragma(dbapi_connection, connection_record):
            """连接时设置参数"""
            logging.info(f"New database connection established: {id(dbapi_connection)}")
        
        @event.listens_for(self.sync_engine, "checkout")
        def receive_checkout(dbapi_connection, connection_record, connection_proxy):
            """连接检出时"""
            logging.debug(f"Connection checked out: {id(dbapi_connection)}")
        
        @event.listens_for(self.sync_engine, "checkin")
        def receive_checkin(dbapi_connection, connection_record):
            """连接检入时"""
            logging.debug(f"Connection checked in: {id(dbapi_connection)}")
    
    @contextmanager
    def get_sync_session(self):
        """获取同步会话"""
        start_time = time.time()
        session = self.sync_session_factory()
        
        try:
            yield session
            session.commit()
            self.monitor.record_request(time.time() - start_time, True)
        except Exception as e:
            session.rollback()
            self.monitor.record_request(time.time() - start_time, False)
            raise
        finally:
            session.close()
    
    @asynccontextmanager
    async def get_async_session(self) -> AsyncGenerator[AsyncSession, None]:
        """获取异步会话"""
        start_time = time.time()
        async with self.async_session_factory() as session:
            try:
                yield session
                await session.commit()
                self.monitor.record_request(time.time() - start_time, True)
            except Exception as e:
                await session.rollback()
                self.monitor.record_request(time.time() - start_time, False)
                raise
    
    def get_pool_status(self) -> Dict[str, Any]:
        """获取连接池状态"""
        pool = self.sync_engine.pool
        
        status = {
            "pool_size": pool.size(),
            "checked_in": pool.checkedin(),
            "checked_out": pool.checkedout(),
            "overflow": pool.overflow(),
            "invalid": pool.invalid(),
            "total_connections": pool.size() + pool.overflow(),
            "available_connections": pool.checkedin(),
            "active_connections": pool.checkedout()
        }
        
        # 更新监控器
        self.monitor.update_pool_stats(
            status["total_connections"],
            status["active_connections"],
            status["available_connections"]
        )
        
        return status
    
    async def health_check(self) -> bool:
        """健康检查"""
        try:
            async with self.get_async_session() as session:
                result = await session.execute(text("SELECT 1"))
                return result.scalar() == 1
        except Exception as e:
            logging.error(f"Database health check failed: {e}")
            return False
    
    async def close(self):
        """关闭连接池"""
        await self.async_engine.dispose()
        self.sync_engine.dispose()

# ============================================================================
# 4. Redis连接池管理器
# ============================================================================

class RedisPoolManager:
    """Redis连接池管理器"""
    
    def __init__(self, config: RedisPoolConfig):
        self.config = config
        self.monitor = PoolMonitor("Redis")
        
        # 创建异步连接池
        self.async_pool = aioredis.ConnectionPool.from_url(
            config.get_url(),
            max_connections=config.max_connections,
            retry_on_timeout=config.retry_on_timeout,
            socket_timeout=config.socket_timeout,
            socket_connect_timeout=config.socket_connect_timeout,
            socket_keepalive=config.socket_keepalive,
            socket_keepalive_options=config.socket_keepalive_options
        )
        
        # 创建同步连接池
        self.sync_pool = SyncRedisPool.from_url(
            config.get_url(),
            max_connections=config.max_connections,
            retry_on_timeout=config.retry_on_timeout,
            socket_timeout=config.socket_timeout,
            socket_connect_timeout=config.socket_connect_timeout,
            socket_keepalive=config.socket_keepalive,
            socket_keepalive_options=config.socket_keepalive_options
        )
        
        self._async_redis = None
        self._sync_redis = None
    
    async def get_async_redis(self):
        """获取异步Redis客户端"""
        if self._async_redis is None:
            self._async_redis = aioredis.Redis(connection_pool=self.async_pool)
        return self._async_redis
    
    def get_sync_redis(self):
        """获取同步Redis客户端"""
        if self._sync_redis is None:
            self._sync_redis = redis.Redis(connection_pool=self.sync_pool)
        return self._sync_redis
    
    @asynccontextmanager
    async def get_async_connection(self):
        """获取异步连接"""
        start_time = time.time()
        redis_client = await self.get_async_redis()
        
        try:
            yield redis_client
            self.monitor.record_request(time.time() - start_time, True)
        except Exception as e:
            self.monitor.record_request(time.time() - start_time, False)
            raise
    
    @contextmanager
    def get_sync_connection(self):
        """获取同步连接"""
        start_time = time.time()
        redis_client = self.get_sync_redis()
        
        try:
            yield redis_client
            self.monitor.record_request(time.time() - start_time, True)
        except Exception as e:
            self.monitor.record_request(time.time() - start_time, False)
            raise
    
    async def get_pool_status(self) -> Dict[str, Any]:
        """获取连接池状态"""
        try:
            redis_client = await self.get_async_redis()
            info = await redis_client.info()
            
            status = {
                "connected_clients": info.get("connected_clients", 0),
                "max_connections": self.config.max_connections,
                "used_memory_human": info.get("used_memory_human", "0B"),
                "total_commands_processed": info.get("total_commands_processed", 0),
                "keyspace_hits": info.get("keyspace_hits", 0),
                "keyspace_misses": info.get("keyspace_misses", 0),
                "uptime_in_seconds": info.get("uptime_in_seconds", 0)
            }
            
            # 更新监控器
            self.monitor.update_pool_stats(
                status["max_connections"],
                status["connected_clients"],
                status["max_connections"] - status["connected_clients"]
            )
            
            return status
        except Exception as e:
            logging.error(f"Failed to get Redis pool status: {e}")
            return {}
    
    async def health_check(self) -> bool:
        """健康检查"""
        try:
            async with self.get_async_connection() as redis_client:
                await redis_client.ping()
                return True
        except Exception as e:
            logging.error(f"Redis health check failed: {e}")
            return False
    
    async def close(self):
        """关闭连接池"""
        if self._async_redis:
            await self._async_redis.close()
        if self._sync_redis:
            self._sync_redis.close()
        
        await self.async_pool.aclose()
        self.sync_pool.disconnect()

# ============================================================================
# 5. MongoDB连接池管理器
# ============================================================================

class MongoPoolManager:
    """MongoDB连接池管理器"""
    
    def __init__(self, config: MongoPoolConfig):
        self.config = config
        self.monitor = PoolMonitor("MongoDB")
        
        # 创建异步客户端
        self.async_client = motor.motor_asyncio.AsyncIOMotorClient(
            config.get_url(),
            maxPoolSize=config.max_pool_size,
            minPoolSize=config.min_pool_size,
            maxIdleTimeMS=config.max_idle_time_ms,
            waitQueueTimeoutMS=config.wait_queue_timeout_ms,
            serverSelectionTimeoutMS=config.server_selection_timeout_ms
        )
        
        # 创建同步客户端
        self.sync_client = MongoClient(
            config.get_url(),
            maxPoolSize=config.max_pool_size,
            minPoolSize=config.min_pool_size,
            maxIdleTimeMS=config.max_idle_time_ms,
            waitQueueTimeoutMS=config.wait_queue_timeout_ms,
            serverSelectionTimeoutMS=config.server_selection_timeout_ms
        )
        
        self.async_db = self.async_client[config.database]
        self.sync_db = self.sync_client[config.database]
    
    @asynccontextmanager
    async def get_async_database(self):
        """获取异步数据库"""
        start_time = time.time()
        
        try:
            yield self.async_db
            self.monitor.record_request(time.time() - start_time, True)
        except Exception as e:
            self.monitor.record_request(time.time() - start_time, False)
            raise
    
    @contextmanager
    def get_sync_database(self):
        """获取同步数据库"""
        start_time = time.time()
        
        try:
            yield self.sync_db
            self.monitor.record_request(time.time() - start_time, True)
        except Exception as e:
            self.monitor.record_request(time.time() - start_time, False)
            raise
    
    async def get_pool_status(self) -> Dict[str, Any]:
        """获取连接池状态"""
        try:
            # 获取服务器状态
            server_status = await self.async_db.command("serverStatus")
            
            status = {
                "max_pool_size": self.config.max_pool_size,
                "min_pool_size": self.config.min_pool_size,
                "current_connections": server_status.get("connections", {}).get("current", 0),
                "available_connections": server_status.get("connections", {}).get("available", 0),
                "total_created": server_status.get("connections", {}).get("totalCreated", 0),
                "uptime_seconds": server_status.get("uptime", 0)
            }
            
            # 更新监控器
            self.monitor.update_pool_stats(
                status["current_connections"],
                status["current_connections"] - status["available_connections"],
                status["available_connections"]
            )
            
            return status
        except Exception as e:
            logging.error(f"Failed to get MongoDB pool status: {e}")
            return {}
    
    async def health_check(self) -> bool:
        """健康检查"""
        try:
            async with self.get_async_database() as db:
                await db.command("ping")
                return True
        except Exception as e:
            logging.error(f"MongoDB health check failed: {e}")
            return False
    
    def close(self):
        """关闭连接池"""
        self.async_client.close()
        self.sync_client.close()

# ============================================================================
# 6. 连接池管理器统一接口
# ============================================================================

class DatabasePoolManager:
    """数据库连接池统一管理器"""
    
    def __init__(self):
        self.pools = {}
        self.monitors = {}
        self._health_check_task = None
        self._monitoring_task = None
    
    def add_sqlalchemy_pool(self, name: str, config: DatabasePoolConfig):
        """添加SQLAlchemy连接池"""
        pool_manager = SQLAlchemyPoolManager(config)
        self.pools[name] = pool_manager
        self.monitors[name] = pool_manager.monitor
        return pool_manager
    
    def add_redis_pool(self, name: str, config: RedisPoolConfig):
        """添加Redis连接池"""
        pool_manager = RedisPoolManager(config)
        self.pools[name] = pool_manager
        self.monitors[name] = pool_manager.monitor
        return pool_manager
    
    def add_mongo_pool(self, name: str, config: MongoPoolConfig):
        """添加MongoDB连接池"""
        pool_manager = MongoPoolManager(config)
        self.pools[name] = pool_manager
        self.monitors[name] = pool_manager.monitor
        return pool_manager
    
    def get_pool(self, name: str):
        """获取连接池"""
        return self.pools.get(name)
    
    async def get_all_pool_status(self) -> Dict[str, Any]:
        """获取所有连接池状态"""
        status = {}
        
        for name, pool in self.pools.items():
            try:
                if hasattr(pool, 'get_pool_status'):
                    if asyncio.iscoroutinefunction(pool.get_pool_status):
                        status[name] = await pool.get_pool_status()
                    else:
                        status[name] = pool.get_pool_status()
                else:
                    status[name] = {"error": "Pool status not available"}
            except Exception as e:
                status[name] = {"error": str(e)}
        
        return status
    
    async def health_check_all(self) -> Dict[str, bool]:
        """检查所有连接池健康状态"""
        health_status = {}
        
        for name, pool in self.pools.items():
            try:
                if hasattr(pool, 'health_check'):
                    health_status[name] = await pool.health_check()
                else:
                    health_status[name] = False
            except Exception as e:
                logging.error(f"Health check failed for {name}: {e}")
                health_status[name] = False
        
        return health_status
    
    def get_monitoring_stats(self) -> Dict[str, Any]:
        """获取监控统计"""
        stats = {}
        
        for name, monitor in self.monitors.items():
            stats[name] = monitor.get_stats()
        
        return stats
    
    def start_health_monitoring(self, interval: int = 30):
        """启动健康监控"""
        async def health_monitor():
            while True:
                try:
                    health_status = await self.health_check_all()
                    unhealthy_pools = [name for name, healthy in health_status.items() if not healthy]
                    
                    if unhealthy_pools:
                        logging.warning(f"Unhealthy pools detected: {unhealthy_pools}")
                    else:
                        logging.info("All pools are healthy")
                    
                    await asyncio.sleep(interval)
                except Exception as e:
                    logging.error(f"Health monitoring error: {e}")
                    await asyncio.sleep(interval)
        
        if self._health_check_task is None or self._health_check_task.done():
            self._health_check_task = asyncio.create_task(health_monitor())
    
    def start_performance_monitoring(self, interval: int = 60):
        """启动性能监控"""
        async def performance_monitor():
            while True:
                try:
                    stats = self.get_monitoring_stats()
                    pool_status = await self.get_all_pool_status()
                    
                    # 记录性能指标
                    for name, stat in stats.items():
                        if stat["total_requests"] > 0:
                            logging.info(
                                f"Pool {name}: {stat['total_requests']} requests, "
                                f"{stat['success_rate']:.1f}% success rate, "
                                f"{stat['avg_response_time']:.3f}s avg response time"
                            )
                    
                    await asyncio.sleep(interval)
                except Exception as e:
                    logging.error(f"Performance monitoring error: {e}")
                    await asyncio.sleep(interval)
        
        if self._monitoring_task is None or self._monitoring_task.done():
            self._monitoring_task = asyncio.create_task(performance_monitor())
    
    async def close_all(self):
        """关闭所有连接池"""
        # 停止监控任务
        if self._health_check_task:
            self._health_check_task.cancel()
        if self._monitoring_task:
            self._monitoring_task.cancel()
        
        # 关闭所有连接池
        for name, pool in self.pools.items():
            try:
                if hasattr(pool, 'close'):
                    if asyncio.iscoroutinefunction(pool.close):
                        await pool.close()
                    else:
                        pool.close()
                logging.info(f"Closed pool: {name}")
            except Exception as e:
                logging.error(f"Error closing pool {name}: {e}")

# ============================================================================
# 7. 全局连接池管理器实例
# ============================================================================

# 全局连接池管理器
pool_manager = DatabasePoolManager()

# 配置连接池
def setup_pools():
    """设置连接池"""
    
    # SQLAlchemy连接池配置
    db_config = DatabasePoolConfig(
        host="localhost",
        port=5432,
        database="fastapi_db",
        username="postgres",
        password="password",
        pool_size=20,
        max_overflow=30,
        pool_timeout=30,
        pool_recycle=3600,
        pool_pre_ping=True
    )
    
    # Redis连接池配置
    redis_config = RedisPoolConfig(
        host="localhost",
        port=6379,
        db=0,
        max_connections=50,
        socket_timeout=5.0
    )
    
    # MongoDB连接池配置
    mongo_config = MongoPoolConfig(
        host="localhost",
        port=27017,
        database="fastapi_db",
        max_pool_size=100,
        min_pool_size=10
    )
    
    # 添加连接池
    pool_manager.add_sqlalchemy_pool("main_db", db_config)
    pool_manager.add_redis_pool("main_redis", redis_config)
    pool_manager.add_mongo_pool("main_mongo", mongo_config)
    
    # 启动监控
    pool_manager.start_health_monitoring(30)
    pool_manager.start_performance_monitoring(60)

# ============================================================================
# 8. FastAPI依赖注入
# ============================================================================

async def get_db_pool():
    """获取数据库连接池"""
    return pool_manager.get_pool("main_db")

async def get_redis_pool():
    """获取Redis连接池"""
    return pool_manager.get_pool("main_redis")

async def get_mongo_pool():
    """获取MongoDB连接池"""
    return pool_manager.get_pool("main_mongo")

# ============================================================================
# 9. Pydantic模型
# ============================================================================

class PoolStatusResponse(BaseModel):
    """连接池状态响应"""
    pool_name: str
    status: Dict[str, Any]
    healthy: bool
    timestamp: datetime

class MonitoringStatsResponse(BaseModel):
    """监控统计响应"""
    pool_name: str
    total_requests: int
    successful_requests: int
    failed_requests: int
    success_rate: float
    avg_response_time: float
    max_response_time: float
    min_response_time: float
    timestamp: datetime

class SystemResourcesResponse(BaseModel):
    """系统资源响应"""
    cpu_percent: float
    memory_percent: float
    disk_usage_percent: float
    network_connections: int
    timestamp: datetime

# ============================================================================
# 10. FastAPI应用程序
# ============================================================================

@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用程序生命周期管理"""
    # 启动时
    print("🚀 启动数据库连接池管理器...")
    setup_pools()
    
    yield
    
    # 关闭时
    print("🛑 关闭数据库连接池管理器...")
    await pool_manager.close_all()

app = FastAPI(
    title="数据库连接池最佳实践",
    description="展示Python中数据库连接池的最佳实践",
    version="1.0.0",
    lifespan=lifespan
)

# ============================================================================
# 11. API路由
# ============================================================================

@app.get("/")
async def root():
    """首页"""
    return {
        "message": "数据库连接池最佳实践示例",
        "features": [
            "SQLAlchemy连接池管理",
            "Redis连接池管理",
            "MongoDB连接池管理",
            "连接池监控",
            "健康检查",
            "性能统计"
        ],
        "endpoints": {
            "pool_status": "/pools/status",
            "health_check": "/pools/health",
            "monitoring": "/pools/monitoring",
            "system_resources": "/system/resources"
        }
    }

@app.get("/pools/status")
async def get_pools_status():
    """获取所有连接池状态"""
    status = await pool_manager.get_all_pool_status()
    health = await pool_manager.health_check_all()
    
    result = []
    for name, pool_status in status.items():
        result.append(PoolStatusResponse(
            pool_name=name,
            status=pool_status,
            healthy=health.get(name, False),
            timestamp=datetime.now()
        ))
    
    return result

@app.get("/pools/health")
async def health_check_pools():
    """健康检查所有连接池"""
    health_status = await pool_manager.health_check_all()
    
    return {
        "overall_healthy": all(health_status.values()),
        "pools": health_status,
        "timestamp": datetime.now()
    }

@app.get("/pools/monitoring")
async def get_monitoring_stats():
    """获取监控统计"""
    stats = pool_manager.get_monitoring_stats()
    
    result = []
    for name, pool_stats in stats.items():
        result.append(MonitoringStatsResponse(
            pool_name=name,
            total_requests=pool_stats["total_requests"],
            successful_requests=pool_stats["successful_requests"],
            failed_requests=pool_stats["failed_requests"],
            success_rate=pool_stats["successful_requests"] / pool_stats["total_requests"] * 100 if pool_stats["total_requests"] > 0 else 0,
            avg_response_time=pool_stats["avg_response_time"],
            max_response_time=pool_stats["max_response_time"],
            min_response_time=pool_stats["min_response_time"] if pool_stats["min_response_time"] != float('inf') else 0,
            timestamp=datetime.now()
        ))
    
    return result

@app.get("/system/resources")
async def get_system_resources():
    """获取系统资源使用情况"""
    cpu_percent = psutil.cpu_percent(interval=1)
    memory = psutil.virtual_memory()
    disk = psutil.disk_usage('/')
    network_connections = len(psutil.net_connections())
    
    return SystemResourcesResponse(
        cpu_percent=cpu_percent,
        memory_percent=memory.percent,
        disk_usage_percent=disk.percent,
        network_connections=network_connections,
        timestamp=datetime.now()
    )

@app.post("/pools/reset-stats")
async def reset_monitoring_stats():
    """重置监控统计"""
    for monitor in pool_manager.monitors.values():
        monitor.reset_stats()
    
    return {
        "message": "监控统计已重置",
        "timestamp": datetime.now()
    }

# 数据库操作示例
@app.get("/demo/database-operations")
async def demo_database_operations(
    db_pool = Depends(get_db_pool),
    redis_pool = Depends(get_redis_pool),
    mongo_pool = Depends(get_mongo_pool)
):
    """演示数据库操作"""
    results = {}
    
    # SQLAlchemy操作
    try:
        async with db_pool.get_async_session() as session:
            result = await session.execute(text("SELECT 'SQLAlchemy连接成功' as message"))
            results["sqlalchemy"] = result.scalar()
    except Exception as e:
        results["sqlalchemy"] = f"错误: {str(e)}"
    
    # Redis操作
    try:
        async with redis_pool.get_async_connection() as redis:
            await redis.set("test_key", "Redis连接成功")
            results["redis"] = await redis.get("test_key")
    except Exception as e:
        results["redis"] = f"错误: {str(e)}"
    
    # MongoDB操作
    try:
        async with mongo_pool.get_async_database() as db:
            await db.test_collection.insert_one({"message": "MongoDB连接成功"})
            doc = await db.test_collection.find_one({"message": "MongoDB连接成功"})
            results["mongodb"] = doc["message"] if doc else "未找到文档"
    except Exception as e:
        results["mongodb"] = f"错误: {str(e)}"
    
    return {
        "message": "数据库操作演示完成",
        "results": results,
        "timestamp": datetime.now()
    }

# 性能测试
@app.get("/demo/performance-test")
async def performance_test(
    concurrent_requests: int = 10,
    db_pool = Depends(get_db_pool)
):
    """性能测试"""
    if concurrent_requests > 100:
        raise HTTPException(status_code=400, detail="并发请求数不能超过100")
    
    async def single_request():
        async with db_pool.get_async_session() as session:
            result = await session.execute(text("SELECT pg_sleep(0.1), 'test' as message"))
            return result.scalar()
    
    start_time = time.time()
    
    # 并发执行请求
    tasks = [single_request() for _ in range(concurrent_requests)]
    results = await asyncio.gather(*tasks, return_exceptions=True)
    
    end_time = time.time()
    
    successful_requests = sum(1 for r in results if not isinstance(r, Exception))
    failed_requests = len(results) - successful_requests
    
    return {
        "message": "性能测试完成",
        "concurrent_requests": concurrent_requests,
        "successful_requests": successful_requests,
        "failed_requests": failed_requests,
        "total_time_seconds": round(end_time - start_time, 3),
        "requests_per_second": round(concurrent_requests / (end_time - start_time), 2),
        "pool_status": db_pool.get_pool_status(),
        "timestamp": datetime.now()
    }

if __name__ == "__main__":
    import uvicorn
    
    print("🚀 启动数据库连接池最佳实践示例...")
    print("📚 API文档: http://localhost:8000/docs")
    
    uvicorn.run(
        "06_database_pool_best_practices:app",
        host="0.0.0.0",
        port=8000,
        reload=True,
        log_level="info"
    )

# ============================================================================
# 12. 连接池最佳实践总结
# ============================================================================

"""
数据库连接池最佳实践总结:

1. 连接池配置原则:
   - pool_size: 根据应用并发量设置，通常为CPU核心数的2-4倍
   - max_overflow: 设置为pool_size的1-2倍
   - pool_timeout: 设置合理的超时时间，避免长时间等待
   - pool_recycle: 定期回收连接，避免长连接问题
   - pool_pre_ping: 启用连接前检查，确保连接有效性

2. 监控和告警:
   - 监控连接池使用率
   - 监控连接获取时间
   - 监控连接失败率
   - 设置告警阈值

3. 性能优化:
   - 合理设置连接池大小
   - 使用连接池预热
   - 避免长事务
   - 及时释放连接

4. 错误处理:
   - 实现连接重试机制
   - 处理连接超时
   - 实现熔断器模式
   - 记录详细错误日志

5. 安全考虑:
   - 使用连接加密
   - 限制连接权限
   - 定期更新密码
   - 监控异常连接

6. 多数据库管理:
   - 统一连接池管理
   - 分离读写连接池
   - 实现数据库路由
   - 支持动态配置

7. 测试和验证:
   - 压力测试
   - 连接泄漏测试
   - 故障恢复测试
   - 性能基准测试

8. 生产环境部署:
   - 环境隔离
   - 配置管理
   - 日志收集
   - 监控告警
"""