"""
分片集群Demo服务
展示如何在实际业务场景中使用xxl-job-admin风格的分片集群功能
"""
import logging
import asyncio
from typing import Dict, Any, List
from datetime import datetime, timedelta
from sqlalchemy.orm import Session

from app.database.base import get_db
from app.services.shard_task import shard_task_service, ShardingStrategy
from app.models.user import User
from app.models.device import Device
from app.models.maintenance_record import MaintenanceRecord

logger = logging.getLogger(__name__)


class ShardDemoService:
    """分片集群Demo服务"""
    
    def __init__(self):
        self.demo_handlers = {}
        self._register_demo_handlers()
    
    def _register_demo_handlers(self):
        """注册Demo分片处理器"""
        # 注册用户数据分析分片处理器
        shard_task_service.register_shard_handler(
            "demo_user_analytics", 
            self.user_analytics_shard_handler
        )
        
        # 注册设备数据同步分片处理器
        shard_task_service.register_shard_handler(
            "demo_device_sync", 
            self.device_sync_shard_handler
        )
        
        # 注册维护记录清理分片处理器
        shard_task_service.register_shard_handler(
            "demo_maintenance_cleanup", 
            self.maintenance_cleanup_shard_handler
        )
        
        # 注册大数据量处理分片处理器
        shard_task_service.register_shard_handler(
            "demo_big_data_processing", 
            self.big_data_processing_shard_handler
        )
        
        logger.info("✅ 所有Demo分片处理器注册完成")

    async def user_analytics_shard_handler(self, **kwargs) -> Dict[str, Any]:
        """
        用户数据分析分片处理器
        使用哈希分片策略处理用户数据分析任务
        """
        shard_index = kwargs.get('shard_index', 0)
        shard_count = kwargs.get('shard_count', 1)
        
        # 从分片参数中获取哈希分片信息
        mod_base = kwargs.get('shard_mod_base', shard_count)
        mod_value = kwargs.get('shard_mod_value', shard_index)
        
        analysis_date = kwargs.get('analysis_date', datetime.now().strftime('%Y-%m-%d'))
        
        logger.info(f"🔍 开始执行用户分析分片 {shard_index}/{shard_count}")
        logger.info(f"📅 分析日期: {analysis_date}")
        logger.info(f"🔢 哈希策略: user_id % {mod_base} == {mod_value}")
        
        try:
            db = next(get_db())
            
            # 使用哈希分片查询用户数据 - 添加类型忽略注释处理SQLAlchemy类型检查
            users_query = db.query(User).filter(
                User.id % mod_base == mod_value  # type: ignore
            )
            users = users_query.all()
            
            processed_users = []
            analytics_results = {
                'total_users': 0,
                'active_users': 0,
                'inactive_users': 0,
                'device_count_stats': {}
            }
            
            for user in users:
                # 模拟用户数据分析处理
                user_id = str(user.id)  # type: ignore - 显式类型转换处理SQLAlchemy Column类型
                username = str(user.username)  # type: ignore
                is_active = bool(user.is_active)  # type: ignore
                
                # 统计用户设备数量
                device_count = db.query(Device).filter(
                    Device.created_by == user.id  # type: ignore
                ).count()
                
                analytics_results['total_users'] += 1
                if is_active:
                    analytics_results['active_users'] += 1
                else:
                    analytics_results['inactive_users'] += 1
                
                # 设备数量分组统计
                device_range = self._get_device_count_range(device_count)
                if device_range not in analytics_results['device_count_stats']:
                    analytics_results['device_count_stats'][device_range] = 0
                analytics_results['device_count_stats'][device_range] += 1
                
                processed_users.append({
                    'user_id': user_id,
                    'username': username,
                    'is_active': is_active,
                    'device_count': device_count,
                    'analysis_time': datetime.now().isoformat()
                })
                
                # 模拟处理时间
                await asyncio.sleep(0.01)
            
            result = {
                'shard_index': shard_index,
                'shard_count': shard_count,
                'analysis_date': analysis_date,
                'hash_strategy': f'user_id % {mod_base} == {mod_value}',
                'processed_count': len(processed_users),
                'analytics_summary': analytics_results,
                'sample_users': processed_users[:5],  # 返回前5个用户作为样本
                'status': 'completed',
                'execution_time': datetime.now().isoformat()
            }
            
            logger.info(f"✅ 用户分析分片 {shard_index} 完成，处理了 {len(processed_users)} 个用户")
            return result
            
        except Exception as e:
            logger.error(f"❌ 用户分析分片 {shard_index} 执行失败: {str(e)}")
            raise

    async def device_sync_shard_handler(self, **kwargs) -> Dict[str, Any]:
        """
        设备数据同步分片处理器
        使用范围分片策略处理设备数据同步任务
        """
        shard_index = kwargs.get('shard_index', 0)
        shard_count = kwargs.get('shard_count', 1)
        
        # 从分片参数中获取范围分片信息
        shard_start = kwargs.get('shard_start', 0)
        shard_end = kwargs.get('shard_end', 100)
        sync_type = kwargs.get('sync_type', 'full')
        
        logger.info(f"🔄 开始执行设备同步分片 {shard_index}/{shard_count}")
        logger.info(f"📊 数据范围: {shard_start} - {shard_end}")
        logger.info(f"🔧 同步类型: {sync_type}")
        
        try:
            db = next(get_db())
            
            # 查询指定范围内的设备 - 添加类型忽略注释
            devices = db.query(Device).offset(shard_start).limit(shard_end - shard_start).all()
            
            synced_devices = []
            sync_stats = {
                'total_devices': 0,
                'success_count': 0,
                'failed_count': 0,
                'skipped_count': 0
            }
            
            for device in devices:
                device_id = str(device.id)  # type: ignore - 显式类型转换
                device_name = str(device.name)  # type: ignore
                device_status = str(device.status)  # type: ignore
                
                try:
                    # 模拟设备数据同步逻辑
                    sync_result = await self._simulate_device_sync(device_id, sync_type)
                    
                    synced_devices.append({
                        'device_id': device_id,
                        'device_name': device_name,
                        'device_status': device_status,
                        'sync_result': sync_result,
                        'sync_time': datetime.now().isoformat()
                    })
                    
                    sync_stats['total_devices'] += 1
                    if sync_result['status'] == 'success':
                        sync_stats['success_count'] += 1
                    elif sync_result['status'] == 'failed':
                        sync_stats['failed_count'] += 1
                    else:
                        sync_stats['skipped_count'] += 1
                    
                    # 模拟同步处理时间
                    await asyncio.sleep(0.02)
                    
                except Exception as sync_error:
                    sync_stats['total_devices'] += 1
                    sync_stats['failed_count'] += 1
                    logger.warning(f"设备 {device_id} 同步失败: {str(sync_error)}")
            
            result = {
                'shard_index': shard_index,
                'shard_count': shard_count,
                'range_start': shard_start,
                'range_end': shard_end,
                'sync_type': sync_type,
                'sync_statistics': sync_stats,
                'synced_devices': synced_devices[:10],  # 返回前10个设备作为样本
                'status': 'completed',
                'execution_time': datetime.now().isoformat()
            }
            
            logger.info(f"✅ 设备同步分片 {shard_index} 完成，同步了 {sync_stats['total_devices']} 个设备")
            return result
            
        except Exception as e:
            logger.error(f"❌ 设备同步分片 {shard_index} 执行失败: {str(e)}")
            raise

    def maintenance_cleanup_shard_handler(self, **kwargs) -> Dict[str, Any]:
        """
        维护记录清理分片处理器
        使用轮询分片策略清理过期的维护记录
        """
        shard_index = kwargs.get('shard_index', 0)
        shard_count = kwargs.get('shard_count', 1)
        
        # 从分片参数中获取轮询分片信息
        shard_offset = kwargs.get('shard_offset', shard_index)
        shard_step = kwargs.get('shard_step', shard_count)
        
        # 清理参数
        days_to_keep = kwargs.get('days_to_keep', 90)  # 保留90天内的记录
        cleanup_date = datetime.now() - timedelta(days=days_to_keep)
        
        logger.info(f"🧹 开始执行维护记录清理分片 {shard_index}/{shard_count}")
        logger.info(f"📅 清理日期阈值: {cleanup_date.strftime('%Y-%m-%d')}")
        logger.info(f"🔄 轮询策略: offset={shard_offset}, step={shard_step}")
        
        try:
            db = next(get_db())
            
            # 查询过期的维护记录，使用轮询分片策略
            expired_records_query = db.query(MaintenanceRecord).filter(
                MaintenanceRecord.created_at < cleanup_date
            )
            
            # 应用轮询分片逻辑
            all_expired_records = expired_records_query.all()
            shard_records = [
                record for i, record in enumerate(all_expired_records) 
                if i % shard_step == shard_offset
            ]
            
            cleanup_stats = {
                'total_found': len(all_expired_records),
                'shard_allocated': len(shard_records),
                'deleted_count': 0,
                'error_count': 0
            }
            
            deleted_records = []
            
            for record in shard_records:
                try:
                    record_id = str(record.id)  # type: ignore - 显式类型转换
                    device_id = str(record.device_id)  # type: ignore
                    maintenance_type = str(record.maintenance_type)  # type: ignore
                    created_at = record.created_at.isoformat()  # type: ignore
                    
                    # 记录删除信息
                    deleted_records.append({
                        'record_id': record_id,
                        'device_id': device_id,
                        'maintenance_type': maintenance_type,
                        'created_at': created_at,
                        'deleted_at': datetime.now().isoformat()
                    })
                    
                    # 执行删除操作
                    db.delete(record)
                    cleanup_stats['deleted_count'] += 1
                    
                except Exception as delete_error:
                    cleanup_stats['error_count'] += 1
                    logger.warning(f"删除维护记录失败: {str(delete_error)}")
            
            # 提交删除操作
            db.commit()
            
            result = {
                'shard_index': shard_index,
                'shard_count': shard_count,
                'cleanup_date_threshold': cleanup_date.isoformat(),
                'round_robin_strategy': f'offset={shard_offset}, step={shard_step}',
                'cleanup_statistics': cleanup_stats,
                'deleted_records_sample': deleted_records[:5],  # 返回前5个删除记录作为样本
                'status': 'completed',
                'execution_time': datetime.now().isoformat()
            }
            
            logger.info(f"✅ 维护记录清理分片 {shard_index} 完成，删除了 {cleanup_stats['deleted_count']} 条记录")
            return result
            
        except Exception as e:
            db.rollback()  # type: ignore
            logger.error(f"❌ 维护记录清理分片 {shard_index} 执行失败: {str(e)}")
            raise

    async def big_data_processing_shard_handler(self, **kwargs) -> Dict[str, Any]:
        """
        大数据量处理分片处理器
        演示如何处理大量数据的分片任务
        """
        shard_index = kwargs.get('shard_index', 0)
        shard_count = kwargs.get('shard_count', 1)
        shard_start = kwargs.get('shard_start', 0)
        shard_end = kwargs.get('shard_end', 1000)
        
        process_type = kwargs.get('process_type', 'analytics')
        batch_size = kwargs.get('batch_size', 100)
        
        logger.info(f"📊 开始执行大数据处理分片 {shard_index}/{shard_count}")
        logger.info(f"🔢 数据范围: {shard_start} - {shard_end}")
        logger.info(f"📦 批处理大小: {batch_size}")
        logger.info(f"⚙️ 处理类型: {process_type}")
        
        try:
            processed_batches = []
            total_processed = 0
            batch_count = 0
            
            # 分批处理数据
            for batch_start in range(shard_start, shard_end, batch_size):
                batch_end = min(batch_start + batch_size, shard_end)
                batch_count += 1
                
                # 模拟批量数据处理
                batch_result = await self._process_data_batch(
                    batch_start, batch_end, process_type, batch_count
                )
                
                processed_batches.append(batch_result)
                total_processed += batch_result['processed_count']
                
                # 进度日志
                if batch_count % 5 == 0:
                    progress = ((batch_start - shard_start) / (shard_end - shard_start)) * 100
                    logger.info(f"📈 分片 {shard_index} 处理进度: {progress:.1f}%")
                
                # 模拟批处理间隔
                await asyncio.sleep(0.05)
            
            result = {
                'shard_index': shard_index,
                'shard_count': shard_count,
                'data_range': f'{shard_start}-{shard_end}',
                'process_type': process_type,
                'batch_size': batch_size,
                'total_batches': batch_count,
                'total_processed': total_processed,
                'processing_summary': {
                    'avg_batch_time': sum(b['processing_time'] for b in processed_batches) / len(processed_batches),
                    'max_batch_time': max(b['processing_time'] for b in processed_batches),
                    'min_batch_time': min(b['processing_time'] for b in processed_batches)
                },
                'batch_details': processed_batches[:3],  # 返回前3个批次作为样本
                'status': 'completed',
                'execution_time': datetime.now().isoformat()
            }
            
            logger.info(f"✅ 大数据处理分片 {shard_index} 完成，总共处理 {total_processed} 条数据")
            return result
            
        except Exception as e:
            logger.error(f"❌ 大数据处理分片 {shard_index} 执行失败: {str(e)}")
            raise

    # ==================== 辅助方法 ====================

    def _get_device_count_range(self, count: int) -> str:
        """根据设备数量返回范围标签"""
        if count == 0:
            return "0_devices"
        elif count <= 5:
            return "1-5_devices"
        elif count <= 10:
            return "6-10_devices"
        elif count <= 20:
            return "11-20_devices"
        else:
            return "20+_devices"

    async def _simulate_device_sync(self, device_id: str, sync_type: str) -> Dict[str, Any]:
        """模拟设备数据同步"""
        # 模拟同步处理时间
        await asyncio.sleep(0.01)
        
        # 模拟不同的同步结果
        import random
        success_rate = 0.85  # 85%成功率
        
        if random.random() < success_rate:
            return {
                'status': 'success',
                'sync_type': sync_type,
                'data_synced': f'{random.randint(10, 100)}KB',
                'sync_duration': f'{random.randint(50, 200)}ms'
            }
        else:
            return {
                'status': 'failed',
                'sync_type': sync_type,
                'error': 'Network timeout or device unreachable',
                'retry_count': random.randint(1, 3)
            }

    async def _process_data_batch(
        self, 
        batch_start: int, 
        batch_end: int, 
        process_type: str, 
        batch_num: int
    ) -> Dict[str, Any]:
        """处理一个数据批次"""
        start_time = datetime.now()
        
        # 模拟批量数据处理
        batch_size = batch_end - batch_start
        
        # 模拟处理时间（与批次大小相关）
        processing_time = batch_size * 0.001  # 每条数据1ms
        await asyncio.sleep(processing_time)
        
        end_time = datetime.now()
        duration = (end_time - start_time).total_seconds()
        
        return {
            'batch_number': batch_num,
            'batch_start': batch_start,
            'batch_end': batch_end,
            'processed_count': batch_size,
            'process_type': process_type,
            'processing_time': duration,
            'start_time': start_time.isoformat(),
            'end_time': end_time.isoformat()
        }

    def get_demo_info(self) -> Dict[str, Any]:
        """获取Demo信息"""
        return {
            "demo_name": "分片集群功能演示",
            "description": "展示xxl-job-admin风格分片集群在实际业务场景中的应用",
            "available_handlers": [
                {
                    "name": "demo_user_analytics",
                    "description": "用户数据分析分片处理",
                    "strategy": "hash",
                    "use_case": "用户画像分析、用户行为统计"
                },
                {
                    "name": "demo_device_sync",
                    "description": "设备数据同步分片处理",
                    "strategy": "range",
                    "use_case": "设备状态同步、数据采集"
                },
                {
                    "name": "demo_maintenance_cleanup",
                    "description": "维护记录清理分片处理",
                    "strategy": "round_robin",
                    "use_case": "历史数据清理、归档处理"
                },
                {
                    "name": "demo_big_data_processing",
                    "description": "大数据量处理分片演示",
                    "strategy": "range",
                    "use_case": "大规模数据分析、批量处理"
                }
            ],
            "features": [
                "多种分片策略演示",
                "实际业务场景模拟", 
                "完整的错误处理",
                "详细的执行日志",
                "性能监控指标"
            ]
        }


# 全局Demo服务实例
shard_demo_service = ShardDemoService()