"""
批量操作优化器模块

提供高性能批量数据操作功能，包括：
- 批量插入优化
- 批量更新优化  
- 批量删除优化
- 批量upsert操作
- 事务管理
- 性能监控
- 错误处理和重试机制
"""

import asyncio
import logging
from typing import (
    Any, Dict, List, Optional, Union, Tuple, 
    TypeVar, Generic, Callable, Set
)
from datetime import datetime, timedelta
from dataclasses import dataclass, field
from enum import Enum
import json
from contextlib import asynccontextmanager

from sqlalchemy import (
    text, select, update, delete, insert,
    Table, MetaData, Column, Integer, String, DateTime,
    Boolean, Float, Text, Index, UniqueConstraint
)
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.dialects.postgresql import insert as pg_insert
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.sql import ClauseElement
from sqlalchemy.exc import SQLAlchemyError, IntegrityError

from .config import config
from .database import db_manager
from .cache import cache_manager

# 类型变量
ModelType = TypeVar('ModelType', bound=DeclarativeBase)

logger = logging.getLogger(__name__)


class BatchOperationType(Enum):
    """批量操作类型"""
    INSERT = "insert"
    UPDATE = "update"
    DELETE = "delete"
    UPSERT = "upsert"
    BULK_INSERT = "bulk_insert"
    BULK_UPDATE = "bulk_update"


class BatchStrategy(Enum):
    """批量处理策略"""
    SEQUENTIAL = "sequential"  # 顺序处理
    PARALLEL = "parallel"     # 并行处理
    CHUNKED = "chunked"       # 分块处理
    STREAMING = "streaming"   # 流式处理


@dataclass
class BatchConfig:
    """批量操作配置"""
    batch_size: int = 1000
    max_workers: int = 4
    chunk_size: int = 100
    timeout: int = 300
    retry_attempts: int = 3
    retry_delay: float = 1.0
    enable_monitoring: bool = True
    enable_caching: bool = True
    transaction_isolation: str = "READ_COMMITTED"
    
    # 性能优化配置
    use_copy: bool = True  # 使用COPY命令
    disable_triggers: bool = False
    disable_constraints: bool = False
    vacuum_after: bool = False
    analyze_after: bool = True


@dataclass
class BatchMetrics:
    """批量操作指标"""
    operation_type: BatchOperationType
    total_records: int = 0
    processed_records: int = 0
    failed_records: int = 0
    start_time: Optional[datetime] = None
    end_time: Optional[datetime] = None
    duration: Optional[float] = None
    throughput: Optional[float] = None  # 记录/秒
    error_rate: Optional[float] = None
    memory_usage: Optional[float] = None
    cpu_usage: Optional[float] = None
    errors: List[str] = field(default_factory=list)
    
    def calculate_metrics(self):
        """计算性能指标"""
        if self.start_time and self.end_time:
            self.duration = (self.end_time - self.start_time).total_seconds()
            if self.duration > 0:
                self.throughput = self.processed_records / self.duration
        
        if self.total_records > 0:
            self.error_rate = self.failed_records / self.total_records


@dataclass
class BatchResult:
    """批量操作结果"""
    success: bool
    metrics: BatchMetrics
    affected_rows: int = 0
    inserted_ids: List[Any] = field(default_factory=list)
    errors: List[Exception] = field(default_factory=list)
    warnings: List[str] = field(default_factory=list)


class BatchOptimizer(Generic[ModelType]):
    """批量操作优化器"""
    
    def __init__(
        self,
        model_class: type[ModelType],
        config: Optional[BatchConfig] = None
    ):
        self.model_class = model_class
        self.config = config or BatchConfig()
        self.table = model_class.__table__
        self.session_factory = db_manager.get_session
        self.cache = cache_manager.get_cache()
        
        # 性能监控
        self._metrics_history: List[BatchMetrics] = []
        self._active_operations: Set[str] = set()
        
    async def batch_insert(
        self,
        records: List[Dict[str, Any]],
        strategy: BatchStrategy = BatchStrategy.CHUNKED,
        on_conflict: str = "ignore"  # ignore, update, error
    ) -> BatchResult:
        """批量插入操作"""
        metrics = BatchMetrics(
            operation_type=BatchOperationType.INSERT,
            total_records=len(records),
            start_time=datetime.utcnow()
        )
        
        try:
            if strategy == BatchStrategy.CHUNKED:
                result = await self._chunked_insert(records, metrics, on_conflict)
            elif strategy == BatchStrategy.PARALLEL:
                result = await self._parallel_insert(records, metrics, on_conflict)
            elif strategy == BatchStrategy.STREAMING:
                result = await self._streaming_insert(records, metrics, on_conflict)
            else:
                result = await self._sequential_insert(records, metrics, on_conflict)
                
            return result
            
        except Exception as e:
            logger.error(f"批量插入失败: {e}")
            metrics.errors.append(str(e))
            return BatchResult(
                success=False,
                metrics=metrics,
                errors=[e]
            )
        finally:
            metrics.end_time = datetime.utcnow()
            metrics.calculate_metrics()
            self._metrics_history.append(metrics)
    
    async def batch_update(
        self,
        updates: List[Dict[str, Any]],
        where_columns: List[str],
        strategy: BatchStrategy = BatchStrategy.CHUNKED
    ) -> BatchResult:
        """批量更新操作"""
        metrics = BatchMetrics(
            operation_type=BatchOperationType.UPDATE,
            total_records=len(updates),
            start_time=datetime.utcnow()
        )
        
        try:
            if strategy == BatchStrategy.CHUNKED:
                result = await self._chunked_update(updates, where_columns, metrics)
            elif strategy == BatchStrategy.PARALLEL:
                result = await self._parallel_update(updates, where_columns, metrics)
            else:
                result = await self._sequential_update(updates, where_columns, metrics)
                
            return result
            
        except Exception as e:
            logger.error(f"批量更新失败: {e}")
            metrics.errors.append(str(e))
            return BatchResult(
                success=False,
                metrics=metrics,
                errors=[e]
            )
        finally:
            metrics.end_time = datetime.utcnow()
            metrics.calculate_metrics()
            self._metrics_history.append(metrics)
    
    async def batch_upsert(
        self,
        records: List[Dict[str, Any]],
        conflict_columns: List[str],
        update_columns: Optional[List[str]] = None,
        strategy: BatchStrategy = BatchStrategy.CHUNKED
    ) -> BatchResult:
        """批量upsert操作（PostgreSQL ON CONFLICT）"""
        metrics = BatchMetrics(
            operation_type=BatchOperationType.UPSERT,
            total_records=len(records),
            start_time=datetime.utcnow()
        )
        
        try:
            if strategy == BatchStrategy.CHUNKED:
                result = await self._chunked_upsert(
                    records, conflict_columns, update_columns, metrics
                )
            elif strategy == BatchStrategy.PARALLEL:
                result = await self._parallel_upsert(
                    records, conflict_columns, update_columns, metrics
                )
            else:
                result = await self._sequential_upsert(
                    records, conflict_columns, update_columns, metrics
                )
                
            return result
            
        except Exception as e:
            logger.error(f"批量upsert失败: {e}")
            metrics.errors.append(str(e))
            return BatchResult(
                success=False,
                metrics=metrics,
                errors=[e]
            )
        finally:
            metrics.end_time = datetime.utcnow()
            metrics.calculate_metrics()
            self._metrics_history.append(metrics)
    
    async def batch_delete(
        self,
        conditions: List[Dict[str, Any]],
        strategy: BatchStrategy = BatchStrategy.CHUNKED
    ) -> BatchResult:
        """批量删除操作"""
        metrics = BatchMetrics(
            operation_type=BatchOperationType.DELETE,
            total_records=len(conditions),
            start_time=datetime.utcnow()
        )
        
        try:
            if strategy == BatchStrategy.CHUNKED:
                result = await self._chunked_delete(conditions, metrics)
            elif strategy == BatchStrategy.PARALLEL:
                result = await self._parallel_delete(conditions, metrics)
            else:
                result = await self._sequential_delete(conditions, metrics)
                
            return result
            
        except Exception as e:
            logger.error(f"批量删除失败: {e}")
            metrics.errors.append(str(e))
            return BatchResult(
                success=False,
                metrics=metrics,
                errors=[e]
            )
        finally:
            metrics.end_time = datetime.utcnow()
            metrics.calculate_metrics()
            self._metrics_history.append(metrics)
    
    # 分块处理实现
    async def _chunked_insert(
        self,
        records: List[Dict[str, Any]],
        metrics: BatchMetrics,
        on_conflict: str
    ) -> BatchResult:
        """分块插入实现"""
        chunks = self._create_chunks(records, self.config.chunk_size)
        total_affected = 0
        all_inserted_ids = []
        errors = []
        
        async with self.session_factory() as session:
            try:
                for i, chunk in enumerate(chunks):
                    try:
                        if on_conflict == "ignore":
                            stmt = pg_insert(self.table).values(chunk)
                            stmt = stmt.on_conflict_do_nothing()
                        elif on_conflict == "update":
                            stmt = pg_insert(self.table).values(chunk)
                            # 构建更新字典
                            update_dict = {
                                col.name: stmt.excluded[col.name]
                                for col in self.table.columns
                                if col.name not in ['id', 'created_at']
                            }
                            stmt = stmt.on_conflict_do_update(
                                index_elements=['id'],
                                set_=update_dict
                            )
                        else:
                            stmt = insert(self.table).values(chunk)
                        
                        result = await session.execute(stmt)
                        affected = result.rowcount
                        total_affected += affected
                        metrics.processed_records += len(chunk)
                        
                        # 获取插入的ID（如果可能）
                        if hasattr(result, 'inserted_primary_key_rows'):
                            all_inserted_ids.extend(result.inserted_primary_key_rows)
                        
                        logger.debug(f"分块 {i+1}/{len(chunks)} 插入完成，影响行数: {affected}")
                        
                    except Exception as e:
                        logger.error(f"分块 {i+1} 插入失败: {e}")
                        errors.append(e)
                        metrics.failed_records += len(chunk)
                
                await session.commit()
                
                # 清理相关缓存
                if self.config.enable_caching:
                    await self._invalidate_cache()
                
                return BatchResult(
                    success=len(errors) == 0,
                    metrics=metrics,
                    affected_rows=total_affected,
                    inserted_ids=all_inserted_ids,
                    errors=errors
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _chunked_update(
        self,
        updates: List[Dict[str, Any]],
        where_columns: List[str],
        metrics: BatchMetrics
    ) -> BatchResult:
        """分块更新实现"""
        chunks = self._create_chunks(updates, self.config.chunk_size)
        total_affected = 0
        errors = []
        
        async with self.session_factory() as session:
            try:
                for i, chunk in enumerate(chunks):
                    try:
                        # 构建批量更新语句
                        for record in chunk:
                            where_clause = {
                                col: record[col] for col in where_columns
                            }
                            update_data = {
                                k: v for k, v in record.items()
                                if k not in where_columns
                            }
                            
                            stmt = update(self.table).where(
                                *[getattr(self.table.c, k) == v 
                                  for k, v in where_clause.items()]
                            ).values(**update_data)
                            
                            result = await session.execute(stmt)
                            total_affected += result.rowcount
                        
                        metrics.processed_records += len(chunk)
                        logger.debug(f"分块 {i+1}/{len(chunks)} 更新完成")
                        
                    except Exception as e:
                        logger.error(f"分块 {i+1} 更新失败: {e}")
                        errors.append(e)
                        metrics.failed_records += len(chunk)
                
                await session.commit()
                
                # 清理相关缓存
                if self.config.enable_caching:
                    await self._invalidate_cache()
                
                return BatchResult(
                    success=len(errors) == 0,
                    metrics=metrics,
                    affected_rows=total_affected,
                    errors=errors
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _chunked_upsert(
        self,
        records: List[Dict[str, Any]],
        conflict_columns: List[str],
        update_columns: Optional[List[str]],
        metrics: BatchMetrics
    ) -> BatchResult:
        """分块upsert实现"""
        chunks = self._create_chunks(records, self.config.chunk_size)
        total_affected = 0
        errors = []
        
        async with self.session_factory() as session:
            try:
                for i, chunk in enumerate(chunks):
                    try:
                        stmt = pg_insert(self.table).values(chunk)
                        
                        # 构建更新字典
                        if update_columns:
                            update_dict = {
                                col: stmt.excluded[col]
                                for col in update_columns
                            }
                        else:
                            update_dict = {
                                col.name: stmt.excluded[col.name]
                                for col in self.table.columns
                                if col.name not in conflict_columns + ['created_at']
                            }
                        
                        stmt = stmt.on_conflict_do_update(
                            index_elements=conflict_columns,
                            set_=update_dict
                        )
                        
                        result = await session.execute(stmt)
                        total_affected += result.rowcount
                        metrics.processed_records += len(chunk)
                        
                        logger.debug(f"分块 {i+1}/{len(chunks)} upsert完成")
                        
                    except Exception as e:
                        logger.error(f"分块 {i+1} upsert失败: {e}")
                        errors.append(e)
                        metrics.failed_records += len(chunk)
                
                await session.commit()
                
                # 清理相关缓存
                if self.config.enable_caching:
                    await self._invalidate_cache()
                
                return BatchResult(
                    success=len(errors) == 0,
                    metrics=metrics,
                    affected_rows=total_affected,
                    errors=errors
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _chunked_delete(
        self,
        conditions: List[Dict[str, Any]],
        metrics: BatchMetrics
    ) -> BatchResult:
        """分块删除实现"""
        chunks = self._create_chunks(conditions, self.config.chunk_size)
        total_affected = 0
        errors = []
        
        async with self.session_factory() as session:
            try:
                for i, chunk in enumerate(chunks):
                    try:
                        # 构建批量删除语句
                        for condition in chunk:
                            stmt = delete(self.table).where(
                                *[getattr(self.table.c, k) == v 
                                  for k, v in condition.items()]
                            )
                            
                            result = await session.execute(stmt)
                            total_affected += result.rowcount
                        
                        metrics.processed_records += len(chunk)
                        logger.debug(f"分块 {i+1}/{len(chunks)} 删除完成")
                        
                    except Exception as e:
                        logger.error(f"分块 {i+1} 删除失败: {e}")
                        errors.append(e)
                        metrics.failed_records += len(chunk)
                
                await session.commit()
                
                # 清理相关缓存
                if self.config.enable_caching:
                    await self._invalidate_cache()
                
                return BatchResult(
                    success=len(errors) == 0,
                    metrics=metrics,
                    affected_rows=total_affected,
                    errors=errors
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    # 并行处理实现
    async def _parallel_insert(
        self,
        records: List[Dict[str, Any]],
        metrics: BatchMetrics,
        on_conflict: str
    ) -> BatchResult:
        """并行插入实现"""
        chunks = self._create_chunks(records, self.config.batch_size)
        
        # 创建并行任务
        tasks = []
        for chunk in chunks:
            task = asyncio.create_task(
                self._insert_chunk(chunk, on_conflict)
            )
            tasks.append(task)
        
        # 等待所有任务完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 汇总结果
        total_affected = 0
        all_inserted_ids = []
        errors = []
        
        for result in results:
            if isinstance(result, Exception):
                errors.append(result)
                metrics.failed_records += self.config.batch_size
            else:
                total_affected += result.get('affected_rows', 0)
                all_inserted_ids.extend(result.get('inserted_ids', []))
                metrics.processed_records += result.get('processed_count', 0)
        
        return BatchResult(
            success=len(errors) == 0,
            metrics=metrics,
            affected_rows=total_affected,
            inserted_ids=all_inserted_ids,
            errors=errors
        )
    
    async def _parallel_update(
        self,
        updates: List[Dict[str, Any]],
        where_columns: List[str],
        metrics: BatchMetrics
    ) -> BatchResult:
        """并行更新实现"""
        chunks = self._create_chunks(updates, self.config.batch_size)
        
        # 创建并行任务
        tasks = []
        for chunk in chunks:
            task = asyncio.create_task(
                self._update_chunk(chunk, where_columns)
            )
            tasks.append(task)
        
        # 等待所有任务完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 汇总结果
        total_affected = 0
        errors = []
        
        for result in results:
            if isinstance(result, Exception):
                errors.append(result)
                metrics.failed_records += self.config.batch_size
            else:
                total_affected += result.get('affected_rows', 0)
                metrics.processed_records += result.get('processed_count', 0)
        
        return BatchResult(
            success=len(errors) == 0,
            metrics=metrics,
            affected_rows=total_affected,
            errors=errors
        )
    
    async def _parallel_upsert(
        self,
        records: List[Dict[str, Any]],
        conflict_columns: List[str],
        update_columns: Optional[List[str]],
        metrics: BatchMetrics
    ) -> BatchResult:
        """并行upsert实现"""
        chunks = self._create_chunks(records, self.config.batch_size)
        
        # 创建并行任务
        tasks = []
        for chunk in chunks:
            task = asyncio.create_task(
                self._upsert_chunk(chunk, conflict_columns, update_columns)
            )
            tasks.append(task)
        
        # 等待所有任务完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 汇总结果
        total_affected = 0
        errors = []
        
        for result in results:
            if isinstance(result, Exception):
                errors.append(result)
                metrics.failed_records += self.config.batch_size
            else:
                total_affected += result.get('affected_rows', 0)
                metrics.processed_records += result.get('processed_count', 0)
        
        return BatchResult(
            success=len(errors) == 0,
            metrics=metrics,
            affected_rows=total_affected,
            errors=errors
        )
    
    async def _parallel_delete(
        self,
        conditions: List[Dict[str, Any]],
        metrics: BatchMetrics
    ) -> BatchResult:
        """并行删除实现"""
        chunks = self._create_chunks(conditions, self.config.batch_size)
        
        # 创建并行任务
        tasks = []
        for chunk in chunks:
            task = asyncio.create_task(
                self._delete_chunk(chunk)
            )
            tasks.append(task)
        
        # 等待所有任务完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 汇总结果
        total_affected = 0
        errors = []
        
        for result in results:
            if isinstance(result, Exception):
                errors.append(result)
                metrics.failed_records += self.config.batch_size
            else:
                total_affected += result.get('affected_rows', 0)
                metrics.processed_records += result.get('processed_count', 0)
        
        return BatchResult(
            success=len(errors) == 0,
            metrics=metrics,
            affected_rows=total_affected,
            errors=errors
        )
    
    # 流式处理实现
    async def _streaming_insert(
        self,
        records: List[Dict[str, Any]],
        metrics: BatchMetrics,
        on_conflict: str
    ) -> BatchResult:
        """流式插入实现"""
        total_affected = 0
        errors = []
        
        async with self.session_factory() as session:
            try:
                # 使用PostgreSQL COPY命令进行流式插入
                if self.config.use_copy and on_conflict == "ignore":
                    result = await self._copy_insert(session, records)
                    total_affected = result.get('affected_rows', 0)
                    metrics.processed_records = len(records)
                else:
                    # 逐条插入
                    for record in records:
                        try:
                            if on_conflict == "ignore":
                                stmt = pg_insert(self.table).values(record)
                                stmt = stmt.on_conflict_do_nothing()
                            elif on_conflict == "update":
                                stmt = pg_insert(self.table).values(record)
                                update_dict = {
                                    col.name: stmt.excluded[col.name]
                                    for col in self.table.columns
                                    if col.name not in ['id', 'created_at']
                                }
                                stmt = stmt.on_conflict_do_update(
                                    index_elements=['id'],
                                    set_=update_dict
                                )
                            else:
                                stmt = insert(self.table).values(record)
                            
                            result = await session.execute(stmt)
                            total_affected += result.rowcount
                            metrics.processed_records += 1
                            
                        except Exception as e:
                            logger.error(f"流式插入记录失败: {e}")
                            errors.append(e)
                            metrics.failed_records += 1
                
                await session.commit()
                
                return BatchResult(
                    success=len(errors) == 0,
                    metrics=metrics,
                    affected_rows=total_affected,
                    errors=errors
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    # 顺序处理实现
    async def _sequential_insert(
        self,
        records: List[Dict[str, Any]],
        metrics: BatchMetrics,
        on_conflict: str
    ) -> BatchResult:
        """顺序插入实现"""
        async with self.session_factory() as session:
            try:
                if on_conflict == "ignore":
                    stmt = pg_insert(self.table).values(records)
                    stmt = stmt.on_conflict_do_nothing()
                elif on_conflict == "update":
                    stmt = pg_insert(self.table).values(records)
                    update_dict = {
                        col.name: stmt.excluded[col.name]
                        for col in self.table.columns
                        if col.name not in ['id', 'created_at']
                    }
                    stmt = stmt.on_conflict_do_update(
                        index_elements=['id'],
                        set_=update_dict
                    )
                else:
                    stmt = insert(self.table).values(records)
                
                result = await session.execute(stmt)
                await session.commit()
                
                metrics.processed_records = len(records)
                
                return BatchResult(
                    success=True,
                    metrics=metrics,
                    affected_rows=result.rowcount
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _sequential_update(
        self,
        updates: List[Dict[str, Any]],
        where_columns: List[str],
        metrics: BatchMetrics
    ) -> BatchResult:
        """顺序更新实现"""
        total_affected = 0
        
        async with self.session_factory() as session:
            try:
                for record in updates:
                    where_clause = {
                        col: record[col] for col in where_columns
                    }
                    update_data = {
                        k: v for k, v in record.items()
                        if k not in where_columns
                    }
                    
                    stmt = update(self.table).where(
                        *[getattr(self.table.c, k) == v 
                          for k, v in where_clause.items()]
                    ).values(**update_data)
                    
                    result = await session.execute(stmt)
                    total_affected += result.rowcount
                
                await session.commit()
                metrics.processed_records = len(updates)
                
                return BatchResult(
                    success=True,
                    metrics=metrics,
                    affected_rows=total_affected
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _sequential_upsert(
        self,
        records: List[Dict[str, Any]],
        conflict_columns: List[str],
        update_columns: Optional[List[str]],
        metrics: BatchMetrics
    ) -> BatchResult:
        """顺序upsert实现"""
        async with self.session_factory() as session:
            try:
                stmt = pg_insert(self.table).values(records)
                
                # 构建更新字典
                if update_columns:
                    update_dict = {
                        col: stmt.excluded[col]
                        for col in update_columns
                    }
                else:
                    update_dict = {
                        col.name: stmt.excluded[col.name]
                        for col in self.table.columns
                        if col.name not in conflict_columns + ['created_at']
                    }
                
                stmt = stmt.on_conflict_do_update(
                    index_elements=conflict_columns,
                    set_=update_dict
                )
                
                result = await session.execute(stmt)
                await session.commit()
                
                metrics.processed_records = len(records)
                
                return BatchResult(
                    success=True,
                    metrics=metrics,
                    affected_rows=result.rowcount
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _sequential_delete(
        self,
        conditions: List[Dict[str, Any]],
        metrics: BatchMetrics
    ) -> BatchResult:
        """顺序删除实现"""
        total_affected = 0
        
        async with self.session_factory() as session:
            try:
                for condition in conditions:
                    stmt = delete(self.table).where(
                        *[getattr(self.table.c, k) == v 
                          for k, v in condition.items()]
                    )
                    
                    result = await session.execute(stmt)
                    total_affected += result.rowcount
                
                await session.commit()
                metrics.processed_records = len(conditions)
                
                return BatchResult(
                    success=True,
                    metrics=metrics,
                    affected_rows=total_affected
                )
                
            except Exception as e:
                await session.rollback()
                raise e
    
    # 辅助方法
    async def _insert_chunk(
        self,
        chunk: List[Dict[str, Any]],
        on_conflict: str
    ) -> Dict[str, Any]:
        """插入单个分块"""
        async with self.session_factory() as session:
            try:
                if on_conflict == "ignore":
                    stmt = pg_insert(self.table).values(chunk)
                    stmt = stmt.on_conflict_do_nothing()
                elif on_conflict == "update":
                    stmt = pg_insert(self.table).values(chunk)
                    update_dict = {
                        col.name: stmt.excluded[col.name]
                        for col in self.table.columns
                        if col.name not in ['id', 'created_at']
                    }
                    stmt = stmt.on_conflict_do_update(
                        index_elements=['id'],
                        set_=update_dict
                    )
                else:
                    stmt = insert(self.table).values(chunk)
                
                result = await session.execute(stmt)
                await session.commit()
                
                return {
                    'affected_rows': result.rowcount,
                    'processed_count': len(chunk),
                    'inserted_ids': getattr(result, 'inserted_primary_key_rows', [])
                }
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _update_chunk(
        self,
        chunk: List[Dict[str, Any]],
        where_columns: List[str]
    ) -> Dict[str, Any]:
        """更新单个分块"""
        async with self.session_factory() as session:
            try:
                total_affected = 0
                
                for record in chunk:
                    where_clause = {
                        col: record[col] for col in where_columns
                    }
                    update_data = {
                        k: v for k, v in record.items()
                        if k not in where_columns
                    }
                    
                    stmt = update(self.table).where(
                        *[getattr(self.table.c, k) == v
                          for k, v in where_clause.items()]
                    ).values(**update_data)
                    
                    result = await session.execute(stmt)
                    total_affected += result.rowcount
                
                await session.commit()
                
                return {
                    'affected_rows': total_affected,
                    'processed_count': len(chunk)
                }
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _upsert_chunk(
        self,
        chunk: List[Dict[str, Any]],
        conflict_columns: List[str],
        update_columns: Optional[List[str]]
    ) -> Dict[str, Any]:
        """Upsert单个分块"""
        async with self.session_factory() as session:
            try:
                stmt = pg_insert(self.table).values(chunk)
                
                # 构建更新字典
                if update_columns:
                    update_dict = {
                        col: stmt.excluded[col]
                        for col in update_columns
                    }
                else:
                    update_dict = {
                        col.name: stmt.excluded[col.name]
                        for col in self.table.columns
                        if col.name not in conflict_columns + ['created_at']
                    }
                
                stmt = stmt.on_conflict_do_update(
                    index_elements=conflict_columns,
                    set_=update_dict
                )
                
                result = await session.execute(stmt)
                await session.commit()
                
                return {
                    'affected_rows': result.rowcount,
                    'processed_count': len(chunk)
                }
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _delete_chunk(
        self,
        chunk: List[Dict[str, Any]]
    ) -> Dict[str, Any]:
        """删除单个分块"""
        async with self.session_factory() as session:
            try:
                total_affected = 0
                
                for condition in chunk:
                    stmt = delete(self.table).where(
                        *[getattr(self.table.c, k) == v
                          for k, v in condition.items()]
                    )
                    
                    result = await session.execute(stmt)
                    total_affected += result.rowcount
                
                await session.commit()
                
                return {
                    'affected_rows': total_affected,
                    'processed_count': len(chunk)
                }
                
            except Exception as e:
                await session.rollback()
                raise e
    
    async def _copy_insert(
        self,
        session: AsyncSession,
        records: List[Dict[str, Any]]
    ) -> Dict[str, Any]:
        """使用PostgreSQL COPY命令进行高性能插入"""
        try:
            # 获取表的列名
            columns = [col.name for col in self.table.columns]
            
            # 准备数据
            import io
            import csv
            
            output = io.StringIO()
            writer = csv.DictWriter(output, fieldnames=columns)
            
            for record in records:
                # 确保记录包含所有列
                row = {col: record.get(col) for col in columns}
                writer.writerow(row)
            
            # 重置到开始位置
            output.seek(0)
            
            # 使用原始连接执行COPY命令
            raw_conn = await session.connection()
            await raw_conn.execute(
                text(f"""
                COPY {self.table.name} ({','.join(columns)})
                FROM STDIN WITH CSV HEADER
                """),
                output.getvalue()
            )
            
            return {
                'affected_rows': len(records),
                'processed_count': len(records)
            }
            
        except Exception as e:
            logger.error(f"COPY插入失败: {e}")
            raise e
    
    def _create_chunks(
        self,
        data: List[Any],
        chunk_size: int
    ) -> List[List[Any]]:
        """创建数据分块"""
        chunks = []
        for i in range(0, len(data), chunk_size):
            chunks.append(data[i:i + chunk_size])
        return chunks
    
    async def _invalidate_cache(self):
        """清理相关缓存"""
        try:
            cache = self.cache
            # 清理表相关的缓存
            pattern = f"{self.table.name}:*"
            await cache.delete_pattern(pattern)
            
            # 清理聚合缓存
            agg_pattern = f"agg:{self.table.name}:*"
            await cache.delete_pattern(agg_pattern)
            
            logger.debug(f"已清理表 {self.table.name} 的相关缓存")
            
        except Exception as e:
            logger.warning(f"缓存清理失败: {e}")
    
    # 性能监控和分析方法
    async def get_performance_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        if not self._metrics_history:
            return {}
        
        # 计算统计信息
        total_operations = len(self._metrics_history)
        successful_operations = sum(
            1 for m in self._metrics_history
            if m.error_rate == 0 or m.error_rate is None
        )
        
        avg_throughput = sum(
            m.throughput for m in self._metrics_history
            if m.throughput is not None
        ) / total_operations if total_operations > 0 else 0
        
        avg_duration = sum(
            m.duration for m in self._metrics_history
            if m.duration is not None
        ) / total_operations if total_operations > 0 else 0
        
        return {
            'total_operations': total_operations,
            'successful_operations': successful_operations,
            'success_rate': successful_operations / total_operations if total_operations > 0 else 0,
            'average_throughput': avg_throughput,
            'average_duration': avg_duration,
            'active_operations': len(self._active_operations),
            'recent_metrics': self._metrics_history[-10:] if self._metrics_history else []
        }
    
    async def optimize_batch_config(
        self,
        sample_data: List[Dict[str, Any]],
        target_throughput: float = 1000.0
    ) -> BatchConfig:
        """自动优化批量配置"""
        logger.info("开始批量配置优化...")
        
        # 测试不同的配置
        test_configs = [
            BatchConfig(batch_size=100, chunk_size=50),
            BatchConfig(batch_size=500, chunk_size=100),
            BatchConfig(batch_size=1000, chunk_size=200),
            BatchConfig(batch_size=2000, chunk_size=500),
        ]
        
        best_config = None
        best_throughput = 0
        
        for config in test_configs:
            try:
                # 使用样本数据测试
                test_data = sample_data[:min(len(sample_data), config.batch_size)]
                
                # 临时更改配置
                original_config = self.config
                self.config = config
                
                # 执行测试
                result = await self.batch_insert(
                    test_data,
                    strategy=BatchStrategy.CHUNKED,
                    on_conflict="ignore"
                )
                
                if result.success and result.metrics.throughput:
                    if result.metrics.throughput > best_throughput:
                        best_throughput = result.metrics.throughput
                        best_config = config
                
                # 恢复原配置
                self.config = original_config
                
                logger.info(
                    f"配置测试 - batch_size: {config.batch_size}, "
                    f"chunk_size: {config.chunk_size}, "
                    f"throughput: {result.metrics.throughput:.2f}"
                )
                
            except Exception as e:
                logger.warning(f"配置测试失败: {e}")
                continue
        
        if best_config:
            logger.info(f"找到最优配置，吞吐量: {best_throughput:.2f} records/sec")
            return best_config
        else:
            logger.warning("未找到最优配置，使用默认配置")
            return BatchConfig()
    
    async def health_check(self) -> Dict[str, Any]:
        """健康检查"""
        try:
            # 测试数据库连接
            async with self.session_factory() as session:
                await session.execute(text("SELECT 1"))
            
            db_status = "healthy"
        except Exception as e:
            db_status = f"unhealthy: {e}"
        
        try:
            # 测试缓存连接
            cache = self.cache
            await cache.ping()
            cache_status = "healthy"
        except Exception as e:
            cache_status = f"unhealthy: {e}"
        
        return {
            'database': db_status,
            'cache': cache_status,
            'active_operations': len(self._active_operations),
            'metrics_history_size': len(self._metrics_history),
            'config': {
                'batch_size': self.config.batch_size,
                'chunk_size': self.config.chunk_size,
                'max_workers': self.config.max_workers
            }
        }


# 全局批量优化器管理器
class BatchOptimizerManager:
    """批量优化器管理器"""
    
    def __init__(self):
        self._optimizers: Dict[str, BatchOptimizer] = {}
        self._global_config = BatchConfig()
    
    def get_optimizer(
        self,
        model_class: type[ModelType],
        config: Optional[BatchConfig] = None
    ) -> BatchOptimizer[ModelType]:
        """获取或创建批量优化器"""
        table_name = model_class.__table__.name
        
        if table_name not in self._optimizers:
            optimizer_config = config or self._global_config
            self._optimizers[table_name] = BatchOptimizer(
                model_class=model_class,
                config=optimizer_config
            )
        
        return self._optimizers[table_name]
    
    def set_global_config(self, config: BatchConfig):
        """设置全局配置"""
        self._global_config = config
    
    async def get_global_metrics(self) -> Dict[str, Any]:
        """获取全局性能指标"""
        all_metrics = {}
        
        for table_name, optimizer in self._optimizers.items():
            metrics = await optimizer.get_performance_metrics()
            all_metrics[table_name] = metrics
        
        return all_metrics
    
    async def health_check_all(self) -> Dict[str, Any]:
        """检查所有优化器的健康状态"""
        health_status = {}
        
        for table_name, optimizer in self._optimizers.items():
            status = await optimizer.health_check()
            health_status[table_name] = status
        
        return health_status


# 全局实例
batch_optimizer_manager = BatchOptimizerManager()


# 便捷函数
async def batch_insert(
    model_class: type[ModelType],
    records: List[Dict[str, Any]],
    strategy: BatchStrategy = BatchStrategy.CHUNKED,
    on_conflict: str = "ignore",
    config: Optional[BatchConfig] = None
) -> BatchResult:
    """便捷的批量插入函数"""
    optimizer = batch_optimizer_manager.get_optimizer(model_class, config)
    return await optimizer.batch_insert(records, strategy, on_conflict)


async def batch_update(
    model_class: type[ModelType],
    updates: List[Dict[str, Any]],
    where_columns: List[str],
    strategy: BatchStrategy = BatchStrategy.CHUNKED,
    config: Optional[BatchConfig] = None
) -> BatchResult:
    """便捷的批量更新函数"""
    optimizer = batch_optimizer_manager.get_optimizer(model_class, config)
    return await optimizer.batch_update(updates, where_columns, strategy)


async def batch_upsert(
    model_class: type[ModelType],
    records: List[Dict[str, Any]],
    conflict_columns: List[str],
    update_columns: Optional[List[str]] = None,
    strategy: BatchStrategy = BatchStrategy.CHUNKED,
    config: Optional[BatchConfig] = None
) -> BatchResult:
    """便捷的批量upsert函数"""
    optimizer = batch_optimizer_manager.get_optimizer(model_class, config)
    return await optimizer.batch_upsert(
        records, conflict_columns, update_columns, strategy
    )


async def batch_delete(
    model_class: type[ModelType],
    conditions: List[Dict[str, Any]],
    strategy: BatchStrategy = BatchStrategy.CHUNKED,
    config: Optional[BatchConfig] = None
) -> BatchResult:
    """便捷的批量删除函数"""
    optimizer = batch_optimizer_manager.get_optimizer(model_class, config)
    return await optimizer.batch_delete(conditions, strategy)