"""
分片任务服务
实现类似xxl-job-admin的分片集群功能
支持多Pod并行处理，充分利用集群资源
"""
import logging
import asyncio
import math
from typing import Dict, Any, Optional, List, Callable
from datetime import datetime
from uuid import UUID

from app.services.distributed_lock import DistributedLock
from app.database.base import get_db
from app.services.scheduled_job import ScheduledJobService
from app.schemas.scheduled_job import ExecutionStatus, TriggerType

logger = logging.getLogger(__name__)


class ShardingStrategy:
    """分片策略枚举"""
    HASH = "hash"           # 哈希分片
    RANGE = "range"         # 范围分片  
    ROUND_ROBIN = "round_robin"  # 轮询分片
    CUSTOM = "custom"       # 自定义分片


class ShardTaskService:
    """分片任务服务"""
    
    def __init__(self):
        self.job_service = ScheduledJobService()
        self.shard_handlers: Dict[str, Callable] = {}
    
    def register_shard_handler(self, handler_name: str, handler_func: Callable):
        """注册分片任务处理器"""
        self.shard_handlers[handler_name] = handler_func
        logger.info(f"注册分片任务处理器: {handler_name}")
    
    async def execute_shard_task(
        self,
        job_id: UUID,
        handler_name: str,
        params: Dict[str, Any],
        log_id: UUID,
        max_shards: int = 10
    ) -> bool:
        """
        执行分片任务
        
        Args:
            job_id: 任务ID
            handler_name: 处理器名称
            params: 任务参数
            log_id: 日志ID
            max_shards: 最大分片数
            
        Returns:
            bool: 是否成功获取并执行了分片
        """
        db = next(get_db())
        distributed_lock = DistributedLock(db)
        
        # 从任务参数中获取分片配置
        shard_count = params.get('shard_count', max_shards)
        shard_strategy = params.get('shard_strategy', ShardingStrategy.HASH)
        
        # 尝试获取分片锁
        shard_index = distributed_lock.acquire_shard_lock(
            job_id=str(job_id),
            total_shards=shard_count,
            ttl_seconds=3600
        )
        
        if shard_index is None:
            # 所有分片都被占用，当前节点无需执行
            logger.info(f"任务 {job_id} 所有分片都被占用，当前节点跳过执行")
            
            # 更新日志状态
            self.job_service.update_job_log(db, log_id, {
                "start_time": datetime.now(),
                "end_time": datetime.now(),
                "status": "SKIPPED",
                "result_code": 0,
                "result_msg": "所有分片都被其他节点占用，跳过执行",
                "executor_sharding_param": f"total={shard_count},current=-1"
            })
            return False
        
        try:
            # 更新日志状态为运行中
            self.job_service.update_job_log(db, log_id, {
                "start_time": datetime.now(),
                "status": ExecutionStatus.RUNNING,
                "executor_sharding_param": f"total={shard_count},current={shard_index}"
            })
            
            logger.info(f"节点开始执行任务 {job_id} 的分片 {shard_index}/{shard_count}")
            
            # 准备分片参数
            shard_params = self._prepare_shard_params(
                params, shard_index, shard_count, shard_strategy
            )
            
            # 获取并执行分片处理器
            handler = self.shard_handlers.get(handler_name)
            if not handler:
                raise Exception(f"未找到分片处理器: {handler_name}")
            
            # 执行分片任务
            if asyncio.iscoroutinefunction(handler):
                result = await handler(**shard_params)
            else:
                result = handler(**shard_params)
            
            # 任务执行成功
            self.job_service.complete_job_execution(
                db, log_id, ExecutionStatus.SUCCESS, 0, str(result)
            )
            
            logger.info(f"任务 {job_id} 分片 {shard_index} 执行成功")
            return True
            
        except Exception as e:
            # 任务执行失败
            error_msg = str(e)
            
            self.job_service.complete_job_execution(
                db, log_id, ExecutionStatus.FAILED, -1, error_msg
            )
            
            logger.error(f"任务 {job_id} 分片 {shard_index} 执行失败: {error_msg}")
            return False
            
        finally:
            # 释放分片锁
            distributed_lock.release_shard_lock(str(job_id), shard_index)
    
    def _prepare_shard_params(
        self,
        original_params: Dict[str, Any],
        shard_index: int,
        shard_count: int,
        strategy: str
    ) -> Dict[str, Any]:
        """
        准备分片参数
        
        Args:
            original_params: 原始参数
            shard_index: 当前分片索引
            shard_count: 总分片数
            strategy: 分片策略
            
        Returns:
            Dict: 分片参数
        """
        shard_params = original_params.copy()
        
        # 添加分片基础信息
        shard_params.update({
            'shard_index': shard_index,
            'shard_count': shard_count,
            'shard_strategy': strategy
        })
        
        # 根据分片策略计算具体的分片参数
        if strategy == ShardingStrategy.RANGE:
            # 范围分片：将数据范围均匀分配
            total_items = original_params.get('total_items', 0)
            if total_items > 0:
                items_per_shard = math.ceil(total_items / shard_count)
                start_index = shard_index * items_per_shard
                end_index = min((shard_index + 1) * items_per_shard, total_items)
                
                shard_params.update({
                    'shard_start': start_index,
                    'shard_end': end_index,
                    'shard_size': end_index - start_index
                })
        
        elif strategy == ShardingStrategy.HASH:
            # 哈希分片：通过取模来分配
            shard_params.update({
                'shard_mod_base': shard_count,
                'shard_mod_value': shard_index
            })
        
        elif strategy == ShardingStrategy.ROUND_ROBIN:
            # 轮询分片：按轮询方式分配
            shard_params.update({
                'shard_offset': shard_index,
                'shard_step': shard_count
            })
        
        return shard_params
    
    def get_shard_status(self, job_id: UUID) -> Dict[str, Any]:
        """
        获取任务的分片执行状态
        
        Args:
            job_id: 任务ID
            
        Returns:
            Dict: 分片状态信息
        """
        db = next(get_db())
        distributed_lock = DistributedLock(db)
        
        # 获取活跃分片
        active_shards = distributed_lock.get_active_shards(str(job_id))
        
        # 统计分片信息
        total_shards = len(active_shards)
        nodes = list(set(shard['node_id'] for shard in active_shards))
        
        return {
            'job_id': str(job_id),
            'total_shards': total_shards,
            'active_nodes': len(nodes),
            'node_list': nodes,
            'shards': active_shards,
            'status': 'running' if total_shards > 0 else 'idle'
        }
    
    def estimate_optimal_shards(
        self,
        data_size: int,
        available_nodes: int,
        processing_time_per_item: float = 0.1
    ) -> int:
        """
        估算最优分片数
        
        Args:
            data_size: 数据总量
            available_nodes: 可用节点数
            processing_time_per_item: 每个数据项的处理时间(秒)
            
        Returns:
            int: 建议的分片数
        """
        # 基础计算：每个分片至少处理100个项目
        min_items_per_shard = 100
        max_shards_by_data = max(1, data_size // min_items_per_shard)
        
        # 考虑节点数量：每个节点最多2个分片
        max_shards_by_nodes = available_nodes * 2
        
        # 考虑处理时间：单个分片处理时间不超过30分钟
        max_processing_time = 30 * 60  # 30分钟
        max_items_per_shard = int(max_processing_time / processing_time_per_item)
        max_shards_by_time = max(1, math.ceil(data_size / max_items_per_shard))
        
        # 取最小值作为最优分片数
        optimal_shards = min(
            max_shards_by_data,
            max_shards_by_nodes,
            max_shards_by_time
        )
        
        logger.info(f"分片数估算: 数据={max_shards_by_data}, 节点={max_shards_by_nodes}, 时间={max_shards_by_time}, 最优={optimal_shards}")
        
        return optimal_shards


# 全局分片任务服务实例
shard_task_service = ShardTaskService()


# ==================== 内置分片任务处理器 ====================

async def shard_data_processing_handler(**kwargs):
    """
    分片数据处理演示处理器
    """
    shard_index = kwargs.get('shard_index', 0)
    shard_count = kwargs.get('shard_count', 1)
    shard_start = kwargs.get('shard_start', 0)
    shard_end = kwargs.get('shard_end', 100)
    
    logger.info(f"开始处理分片 {shard_index}/{shard_count}, 数据范围: {shard_start}-{shard_end}")
    
    # 模拟数据处理
    processed_count = 0
    for i in range(shard_start, shard_end):
        # 模拟处理每个数据项
        await asyncio.sleep(0.01)  # 模拟处理时间
        processed_count += 1
        
        # 每处理100个输出一次进度
        if processed_count % 100 == 0:
            logger.info(f"分片 {shard_index} 已处理 {processed_count} 个数据项")
    
    result = {
        'shard_index': shard_index,
        'processed_count': processed_count,
        'data_range': f'{shard_start}-{shard_end}',
        'status': 'completed'
    }
    
    logger.info(f"分片 {shard_index} 处理完成，共处理 {processed_count} 个数据项")
    return result


def shard_hash_processing_handler(**kwargs):
    """
    哈希分片处理演示处理器
    """
    shard_index = kwargs.get('shard_index', 0)
    shard_count = kwargs.get('shard_count', 1)
    mod_base = kwargs.get('shard_mod_base', shard_count)
    mod_value = kwargs.get('shard_mod_value', shard_index)
    
    # 模拟处理哈希分片的数据
    total_items = kwargs.get('total_items', 1000)
    processed_items = []
    
    for item_id in range(total_items):
        # 哈希分片逻辑：item_id % mod_base == mod_value
        if item_id % mod_base == mod_value:
            processed_items.append(item_id)
    
    result = {
        'shard_index': shard_index,
        'processed_count': len(processed_items),
        'hash_strategy': f'id % {mod_base} == {mod_value}',
        'sample_items': processed_items[:10],  # 显示前10个处理的项目
        'status': 'completed'
    }
    
    logger.info(f"哈希分片 {shard_index} 处理完成，共处理 {len(processed_items)} 个数据项")
    return result


def shard_device_sync_handler(**kwargs):
    """
    设备数据同步分片处理器
    """
    shard_index = kwargs.get('shard_index', 0)
    shard_count = kwargs.get('shard_count', 1)
    device_list = kwargs.get('device_list', [])
    
    # 按分片处理设备列表
    devices_per_shard = len(device_list) // shard_count
    start_idx = shard_index * devices_per_shard
    
    if shard_index == shard_count - 1:
        # 最后一个分片处理剩余的所有设备
        end_idx = len(device_list)
    else:
        end_idx = (shard_index + 1) * devices_per_shard
    
    shard_devices = device_list[start_idx:end_idx]
    
    # 模拟设备数据同步
    synced_devices = []
    for device in shard_devices:
        # 模拟同步每个设备的数据
        synced_devices.append({
            'device_id': device,
            'sync_time': datetime.now().isoformat(),
            'status': 'success'
        })
    
    result = {
        'shard_index': shard_index,
        'device_range': f'{start_idx}-{end_idx}',
        'synced_count': len(synced_devices),
        'synced_devices': synced_devices,
        'status': 'completed'
    }
    
    logger.info(f"设备同步分片 {shard_index} 完成，同步了 {len(synced_devices)} 个设备")
    return result


# 注册内置分片处理器
def register_builtin_shard_handlers():
    """注册内置分片任务处理器"""
    shard_task_service.register_shard_handler("shard_data_processing", shard_data_processing_handler)
    shard_task_service.register_shard_handler("shard_hash_processing", shard_hash_processing_handler)
    shard_task_service.register_shard_handler("shard_device_sync", shard_device_sync_handler)