from celery_app import celery
from datetime import datetime
import time
import uuid
import random
from functools import wraps
import redis
import json
import psutil
import logging
import socket
import subprocess
import os
import tempfile
from config import MultiNodeConfig
from ping3 import ping, verbose_ping

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 定义缓存目录
CACHE_DIR = os.path.join(os.path.dirname(__file__), 'cache')
if not os.path.exists(CACHE_DIR):
    os.makedirs(CACHE_DIR)

_redis_client = None

def get_redis_client(role):
    """动态获取Redis客户端，自动连接主节点"""
    global _redis_client
    if _redis_client is not None:
        try:
            _redis_client.ping()
            return _redis_client
        except (redis.ConnectionError, redis.TimeoutError):
            _redis_client = None

    max_retries = 3
    retry_delay = 1
    
    for attempt in range(max_retries):
        try:
            temp_client = redis.Redis(
                host=MultiNodeConfig.get_redis_host(role=role),
                port=MultiNodeConfig.REDIS_PORT,
                db=MultiNodeConfig.REDIS_DB,
                decode_responses=True,
                socket_timeout=5,
                socket_connect_timeout=5
            )
            
            master_ip = temp_client.get("master_ip") or MultiNodeConfig.get_redis_host(role=role)
            
            _redis_client = redis.Redis(
                host=master_ip,
                port=MultiNodeConfig.REDIS_PORT,
                db=MultiNodeConfig.REDIS_DB,
                decode_responses=True,
                socket_timeout=5,
                socket_connect_timeout=5
            )
            
            _redis_client.ping()
            logger.info(f"Connected to Redis at {master_ip}")
            return _redis_client
            
        except (redis.ConnectionError, redis.TimeoutError) as e:
            if attempt == max_retries - 1:
                logger.error(f"Failed to connect to Redis after {max_retries} attempts: {str(e)}")
                raise
            logger.warning(f"Redis connection failed (attempt {attempt+1}): {str(e)}")
            time.sleep(retry_delay)

def with_redis_retry(max_retries=3, retry_delay=1):
    """Redis操作重试装饰器"""
    def decorator(f):
        @wraps(f)
        def wrapper(*args, **kwargs):
            for attempt in range(max_retries):
                try:
                    return f(*args, **kwargs)
                except (redis.ConnectionError, redis.TimeoutError) as e:
                    if attempt == max_retries - 1:
                        logger.error(f"Redis operation failed after {max_retries} attempts: {str(e)}")
                        raise
                    logger.warning(f"Redis operation failed (attempt {attempt+1}): {str(e)}")
                    time.sleep(retry_delay)
                    global _redis_client
                    _redis_client = None
        return wrapper
    return decorator

def log_task_execution(task_name):
    """任务执行日志记录装饰器"""
    def decorator(f):
        @wraps(f)
        def wrapper(*args, **kwargs):
            start_time = time.time()
            try:
                result = f(*args, **kwargs)
                execution_time = time.time() - start_time
                redis_conn = get_redis_client(role="master")
                redis_conn.lpush('logs', json.dumps({
                    "id": str(uuid.uuid4()),
                    "type": "task",
                    "action": task_name,
                    "status": "success",
                    "execution_time": execution_time,
                    "timestamp": time.time(),
                    "user": "system",
                    "result": str(result)
                }, ensure_ascii=False))
                logger.info(f"Task {task_name} completed successfully in {execution_time:.2f}s")
                return result
            except Exception as e:
                execution_time = time.time() - start_time
                redis_conn = get_redis_client(role="master")
                redis_conn.lpush('logs', json.dumps({
                    "id": str(uuid.uuid4()),
                    "type": "task",
                    "action": task_name,
                    "status": "failed",
                    "error": str(e),
                    "execution_time": execution_time,
                    "timestamp": time.time(),
                    "user": "system"
                }, ensure_ascii=False))
                logger.error(f"Task {task_name} failed: {str(e)}")
                raise
        return wrapper
    return decorator

@celery.task(bind=True)
@log_task_execution("init_templates")
@with_redis_retry(max_retries=3)
def init_templates(self):
    """从 Redis 的 isos 哈希表动态初始化或更新模板元数据，仅为新 ISO 生成模板"""
    self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '开始初始化模板'})
    redis_conn = get_redis_client(role="master")
    templates = []

    try:
        logger.info("Fetching ISO files from Redis")
        isos = {k: json.loads(v) for k, v in redis_conn.hgetall('isos').items()}
        if not isos:
            logger.warning("No ISO files found in Redis, skipping template initialization")
            self.update_state(state='SUCCESS', meta={'progress': 100, 'message': '无 ISO 文件可初始化'})
            return {"status": "success", "message": "No ISO files available", "templates_updated": 0}

        total_isos = len(isos)
        logger.info(f"Found {total_isos} ISO files to process")
        self.update_state(state='PROGRESS', meta={'progress': 20, 'message': f'处理 {total_isos} 个 ISO 文件'})

        os_config_map = {
            'memtest': {
                'name': "MemTest86 内存测试工具",
                'os': "MemTest86",
                'cpu': 1,
                'memory': 1,
                'disk': 1,
                'description': "专业内存测试工具，用于检测内存错误"
            },
            'debian': {
                'name': "Debian 服务器模板",
                'os': "Debian",
                'cpu': 2,
                'memory': 2,
                'disk': 20,
                'description': "稳定可靠的Debian服务器系统"
            },
            'alpine': {
                'name': "Alpine Linux 轻量级模板",
                'os': "Alpine Linux",
                'cpu': 1,
                'memory': 1,
                'disk': 5,
                'description': "超轻量级Linux发行版，适合容器和嵌入式系统"
            },
            'default': {
                'name': "通用Linux服务器模板",
                'os': "Linux",
                'cpu': 2,
                'memory': 2,
                'disk': 20,
                'description': "通用Linux服务器配置"
            }
        }

        existing_templates = {k: json.loads(v) for k, v in redis_conn.hgetall('templates').items()}
        existing_iso_ids = {t['iso_id'] for t in existing_templates.values()}

        for index, (iso_id, iso_data) in enumerate(isos.items(), 1):
            if iso_id in existing_iso_ids:
                logger.info(f"Skipping ISO {iso_data['name']} as template already exists")
                continue

            iso_name = iso_data['name'].lower()
            logger.info(f"Processing ISO {index}/{total_isos}: {iso_name}")
            
            os_type = 'memtest' if 'memtest' in iso_name else \
                     'debian' if 'debian' in iso_name else \
                     'alpine' if 'alpine' in iso_name else 'default'
            if os_type == 'default':
                logger.warning(f"Unknown ISO type for {iso_name}, using default template configuration")

            config = os_config_map.get(os_type, os_config_map['default'])
            
            template_id = f"tpl-{uuid.uuid4().hex[:8]}"
            template = {
                "id": template_id,
                "iso_id": iso_id,
                "createdAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "name": config['name'],
                "os": config['os'],
                "cpu": config['cpu'],
                "memory": config['memory'],
                "disk": config['disk'],
                "description": config['description'],
                "networkType": "nat",
                "graphics": "vnc",
                "bootOrder": ["cdrom", "hd"],
                "recommendedUsage": config['name']
            }

            templates.append(template)
            self.update_state(state='PROGRESS', meta={
                'progress': 20 + (index / total_isos) * 60,
                'message': f'已处理 {index}/{total_isos} 个 ISO 文件: {iso_name}'
            })

        if templates:
            logger.info(f"Storing {len(templates)} new templates to Redis")
            self.update_state(state='PROGRESS', meta={'progress': 80, 'message': '存储模板到 Redis'})
            pipeline = redis_conn.pipeline()
            for template in templates:
                pipeline.hset('templates', template['id'], json.dumps(template, ensure_ascii=False))
            pipeline.execute()
            logger.info(f"Stored {len(templates)} new templates to Redis")

        self.update_state(state='SUCCESS', meta={'progress': 100, 'message': '模板更新完成'})
        logger.info(f"Updated {len(templates)} templates")
        return {
            "status": "success",
            "message": f"Updated {len(templates)} templates",
            "templates_updated": len(templates),
            "templates": templates
        }

    except Exception as e:
        logger.error(f"Failed to update templates: {str(e)}")
        self.update_state(state='FAILURE', meta={'message': f"模板更新失败: {str(e)}"})
        return {"status": "error", "message": f"Failed to update templates: {str(e)}"}

@celery.task(bind=True)
@log_task_execution("register_host")
@with_redis_retry(max_retries=5)
def register_host(self, role="worker"):
    """注册当前主机为节点"""
    if isinstance(role, dict) and "role" in role:
        role = role["role"]
    if role not in ["master", "worker"]:
        logger.error(f"Invalid role: {role}")
        raise ValueError(f"Invalid role: {role}, must be 'master' or 'worker'")

    redis_conn = get_redis_client(role=role)
    hostname = socket.gethostname()
    node_id = f"node-{hostname}"
    ip = MultiNodeConfig.get_local_ip()
    
    if role == "master":
        current_master = redis_conn.get('master_node')
        if current_master:
            if current_master != node_id:
                master_data = redis_conn.hget('nodes', current_master)
                if master_data:
                    master = json.loads(master_data)
                    if time.time() - master['last_heartbeat'] < MultiNodeConfig.HEARTBEAT_TIMEOUT:
                        logger.info(f"Master already exists: {current_master}, registering as worker")
                        role = "worker"
                    else:
                        logger.warning(f"Existing master {current_master} is offline, taking over")
                        redis_conn.delete('master_node')
                else:
                    logger.warning(f"Master {current_master} not found in nodes, clearing")
                    redis_conn.delete('master_node')
    
    cpu_usage = psutil.cpu_percent(interval=1)
    memory_info = psutil.virtual_memory()
    memory_usage = memory_info.percent

    node_data = {
        "id": node_id,
        "name": hostname,
        "ip": ip,
        "status": "正常",
        "cpuUsage": cpu_usage,
        "memoryUsage": memory_usage,
        "role": role,
        "last_heartbeat": time.time()
    }
    redis_conn.hset('nodes', node_id, json.dumps(node_data, ensure_ascii=False))
    
    if role == "master":
        redis_conn.set('master_node', node_id)
    
    logger.info(f"Registered host as node: {node_id} with role: {role}, ip: {ip}")
    return {"status": "success", "node_id": node_id, "role": role}

@celery.task(bind=True)
@log_task_execution("update_node_metrics")
@with_redis_retry(max_retries=3)
def update_node_metrics(self):
    """更新当前节点的资源使用情况"""
    redis_conn = get_redis_client(role="master")
    hostname = socket.gethostname()
    node_id = f"node-{hostname}"
    
    node_data = redis_conn.hget('nodes', node_id)
    if not node_data:
        logger.warning(f"Node {node_id} not found in Redis, attempting to re-register")
        result = register_host.apply(args=["worker"])
        if result.successful():
            logger.info(f"Node {node_id} re-registered successfully: {result.get()}")
            node_data = redis_conn.hget('nodes', node_id)
        else:
            logger.error(f"Failed to re-register node {node_id}: {result.get()}")
            return {"status": "error", "message": f"Node {node_id} not found and re-registration failed"}
    
    node = json.loads(node_data)
    try:
        cpu_usage = psutil.cpu_percent(interval=1)
        memory_info = psutil.virtual_memory()
        memory_usage = memory_info.percent
    except Exception as e:
        logger.warning(f"psutil failed: {str(e)}, keeping previous values")
        cpu_usage = node['cpuUsage']
        memory_usage = node['memoryUsage']
    
    node['cpuUsage'] = cpu_usage
    node['memoryUsage'] = memory_usage
    node['last_heartbeat'] = time.time()
    redis_conn.hset('nodes', node_id, json.dumps(node, ensure_ascii=False))
    
    logger.info(f"Updated node {node_id}: cpu={cpu_usage}%, memory={memory_usage}%")
    return {"status": "success", "node_id": node_id}

@celery.task(bind=True)
@log_task_execution("check_master_status")
@with_redis_retry(max_retries=3)
def check_master_status(self):
    """检查主节点健康状态"""
    redis_conn = get_redis_client(role="master")
    master_node = redis_conn.get('master_node')
    if not master_node:
        logger.warning("No master node found")
        return {"status": "success", "message": "No master node"}
    
    node_data = redis_conn.hget('nodes', master_node)
    if not node_data:
        logger.warning(f"Master node {master_node} not found in nodes, clearing")
        redis_conn.delete('master_node')
        return {"status": "success", "message": f"Cleared invalid master_node {master_node}"}
    
    node = json.loads(node_data)
    last_heartbeat = node.get('last_heartbeat', 0)
    
    if time.time() - last_heartbeat > MultiNodeConfig.HEARTBEAT_TIMEOUT:
        logger.warning(f"Master node {master_node} is offline, clearing")
        redis_conn.delete('master_node')
        return {"status": "success", "message": f"Cleared offline master_node {master_node}"}
    
    logger.info(f"Master node {master_node} is online")
    return {"status": "success", "message": f"Master node {master_node} is online"}

@celery.task(bind=True)
@log_task_execution("check_vm_status")
@with_redis_retry(max_retries=3)
def check_vm_status(self):
    """检查虚拟机状态"""
    redis_conn = get_redis_client(role="master")
    vms = [json.loads(v) for v in redis_conn.hgetall('vms').values()]
    for vm in vms:
        try:
            result = subprocess.run(["virsh", "domstate", vm["id"]], capture_output=True, text=True)
            status = result.stdout.strip()
            vm["status"] = "running" if status == "running" else "stopped"
            redis_conn.hset('vms', vm['id'], json.dumps(vm, ensure_ascii=False))
        except subprocess.CalledProcessError as e:
            logger.error(f"Failed to check VM {vm['id']} status: {str(e)}")
    return {"status": "success", "checked_vms": len(vms)}

@celery.task(bind=True)
@log_task_execution("clean_expired_tokens")
@with_redis_retry(max_retries=3)
def clean_expired_tokens(self):
    """清理过期token"""
    redis_conn = get_redis_client(role="master")
    tokens = redis_conn.hgetall('tokens')
    current_time = time.time()
    expired_tokens = [token for token, data in tokens.items() 
                     if json.loads(data)['expire'] < current_time]
    for token in expired_tokens:
        redis_conn.hdel('tokens', token)
    return {"status": "success", "cleaned_tokens": len(expired_tokens)}

@celery.task(bind=True)
@log_task_execution("update_vm_network_info")
@with_redis_retry(max_retries=3)
def update_vm_network_info(self, vm_id):
    """更新虚拟机的网络信息"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '初始化网络信息查询'})
        logger.info(f"开始更新虚拟机 {vm_id} 的网络信息")

        redis_conn = get_redis_client(role="master")
        vm = json.loads(redis_conn.hget('vms', vm_id) or '{}')
        if not vm:
            logger.error(f"虚拟机 {vm_id} 未找到")
            return {"status": "error", "message": f"虚拟机 {vm_id} 未找到"}

        if vm["status"] != "running":
            logger.info(f"虚拟机 {vm_id} 未运行，无需更新网络信息")
            return {"status": "success", "message": f"虚拟机 {vm_id} 未运行"}

        node_id = vm.get('node')
        node_data = json.loads(redis_conn.hget('nodes', node_id) or '{}')
        if not node_data or node_data.get('status') != '正常':
            logger.error(f"节点 {node_id} 无效或不可用")
            return {"status": "error", "message": f"节点 {node_id} 无效或不可用"}

        node_ip = node_data['ip']
        self.update_state(state='PROGRESS', meta={'progress': 50, 'message': '查询网络信息'})

        try:
            # 获取虚拟机的 MAC 地址
            mac_cmd = f"virsh domiflist {vm_id} | grep network | awk '{{print $3}}'"
            result = subprocess.run(
                ["ssh", f"root@{node_ip}", mac_cmd],
                capture_output=True, text=True, check=True, timeout=10
            )
            mac_address = result.stdout.strip()
            if not mac_address:
                logger.warning(f"未找到虚拟机 {vm_id} 的 MAC 地址")
                return {"status": "error", "message": "未找到 MAC 地址"}

            # 通过 ARP 表查找 IP 地址
            arp_cmd = f"arp -a | grep {mac_address} | awk '{{print $2}}' | tr -d '()'"
            result = subprocess.run(
                ["ssh", f"root@{node_ip}", arp_cmd],
                capture_output=True, text=True, check=True, timeout=10
            )
            ip_address = result.stdout.strip()
            if not ip_address:
                logger.warning(f"未找到虚拟机 {vm_id} 的 IP 地址")
                return {"status": "error", "message": "未找到 IP 地址"}

            # 更新 Redis 中的 IP 地址
            vm["ip"] = ip_address
            redis_conn.hset('vms', vm_id, json.dumps(vm, ensure_ascii=False))
            logger.info(f"虚拟机 {vm_id} 的 IP 地址更新为 {ip_address}")

            self.update_state(state='SUCCESS', meta={'progress': 100, 'message': '网络信息更新完成'})
            return {
                "status": "success",
                "vm_id": vm_id,
                "ip": ip_address,
                "message": f"虚拟机 {vm_id} 的网络信息更新完成"
            }

        except subprocess.CalledProcessError as e:
            logger.error(f"查询网络信息失败: {e.stderr}")
            return {"status": "error", "message": f"查询网络信息失败: {e.stderr}"}
        except subprocess.TimeoutExpired:
            logger.error("查询网络信息超时")
            return {"status": "error", "message": "查询网络信息超时"}

    except Exception as e:
        logger.error(f"更新虚拟机网络信息时发生错误: {str(e)}")
        return {"status": "error", "message": f"更新网络信息时发生错误: {str(e)}"}

@celery.task(bind=True)
@log_task_execution("vm_create_async")
@with_redis_retry(max_retries=3)
def create_vm_async(self, vm_config):
    """异步创建虚拟机"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '初始化'})
        logger.info(f"开始创建虚拟机: {vm_config}")

        redis_conn = get_redis_client(role="master")

        self.update_state(state='PROGRESS', meta={'progress': 10, 'message': '验证目标节点'})
        node_id = vm_config.get('node')
        if not node_id:
            logger.error("未指定目标节点")
            return {"status": "error", "message": "未指定目标节点"}

        node_data = json.loads(redis_conn.hget('nodes', node_id) or '{}')
        if not node_data or node_data.get('status') != '正常':
            logger.error(f"无效或不可用的节点: {node_id}")
            return {"status": "error", "message": f"无效或不可用的节点: {node_id}"}
        
        node_ip = node_data['ip']
        logger.info(f"目标节点: {node_id} ({node_ip})")

        self.update_state(state='PROGRESS', meta={'progress': 20, 'message': '验证 ISO 文件'})
        iso_id = vm_config.get('iso')
        if not iso_id:
            logger.error("未指定 ISO 文件")
            return {"status": "error", "message": "未指定 ISO 文件"}

        iso_data = json.loads(redis_conn.hget('isos', iso_id) or '{}')
        if not iso_data:
            logger.error(f"未找到 ISO 文件: {iso_id}")
            return {"status": "error", "message": "未找到 ISO 文件"}
        
        iso_name = iso_data['name']
        iso_paths = iso_data.get('paths', {})
        if node_id not in iso_paths:
            logger.error(f"ISO 文件 {iso_name} 在节点 {node_id} 上不可用")
            return {"status": "error", "message": f"ISO 文件 {iso_name} 在节点 {node_id} 上不可用"}
        
        iso_path = iso_paths[node_id]
        logger.info(f"使用 ISO 文件: {iso_name} ({iso_path}) on node {node_id}")

        try:
            subprocess.run(
                ["ssh", f"root@{node_ip}", f"test -f {iso_path}"],
                check=True, capture_output=True, text=True, timeout=10
            )
            logger.info(f"ISO 文件在节点 {node_id} 上可访问")
        except subprocess.CalledProcessError as e:
            logger.error(f"节点 {node_id} 上的 ISO 文件不可访问: {iso_path}")
            return {"status": "error", "message": f"ISO 文件在节点 {node_id} 上不可访问"}
        except subprocess.TimeoutExpired:
            logger.error(f"验证 ISO 文件超时")
            return {"status": "error", "message": "验证 ISO 文件超时"}

        vm_name = vm_config.get('name', f"vm-{str(uuid.uuid4())[:8]}")
        vm_config.setdefault('cpu', 2)
        vm_config.setdefault('memory', 4)
        vm_config.setdefault('disk', 50)
        vm_config['os'] = 'Linux'  # 固定操作系统类型为 Linux
        vm_config.setdefault('networkType', 'nat')

        vm_id = f"vm-{str(uuid.uuid4())[:8]}"
        disk_path = f"/var/lib/libvirt/images/{vm_id}.qcow2"

        self.update_state(state='PROGRESS', meta={'progress': 40, 'message': '创建虚拟磁盘'})
        try:
            subprocess.run([
                "ssh", f"root@{node_ip}",
                "qemu-img", "create", "-f", "qcow2", disk_path, f"{vm_config['disk']}G"
            ], check=True, timeout=300)
            logger.info(f"在节点 {node_id} 上创建磁盘成功: {disk_path}")
        except subprocess.CalledProcessError as e:
            logger.error(f"创建磁盘失败: {e.stderr}")
            return {"status": "error", "message": f"创建磁盘失败: {e.stderr}"}
        except subprocess.TimeoutExpired:
            logger.error(f"创建磁盘超时")
            return {"status": "error", "message": "创建磁盘超时"}

        self.update_state(state='PROGRESS', meta={'progress': 60, 'message': '生成虚拟机配置'})
        
        mac_address = ':'.join(['%02x' % random.randint(0, 255) for _ in range(6)])
        if vm_config['networkType'] == 'bridge':
            if 'ip' in vm_config and vm_config['ip']:
                # 用户指定了 IP 地址
                network_config = f"""
                    <interface type='bridge'>
                        <source bridge='br0'/>
                        <mac address='{mac_address}'/>
                        <model type='virtio'/>
                        <address type='ipv4' value='{vm_config['ip']}'/>
                    </interface>"""
            else:
                # 桥接模式，依赖 DHCP
                network_config = f"""
                    <interface type='bridge'>
                        <source bridge='br0'/>
                        <mac address='{mac_address}'/>
                        <model type='virtio'/>
                    </interface>"""
        else:
            # NAT 模式
            network_config = f"""
                <interface type='network'>
                    <source network='default'/>
                    <mac address='{mac_address}'/>
                    <model type='virtio'/>
                </interface>"""

        vm_xml = f"""
        <domain type='kvm'>
            <name>{vm_id}</name>
            <uuid>{str(uuid.uuid4())}</uuid>
            <memory unit='GiB'>{vm_config['memory']}</memory>
            <vcpu>{vm_config['cpu']}</vcpu>
            <os>
                <type arch='x86_64' machine='pc'>hvm</type>
                <boot dev='cdrom'/>
            </os>
            <devices>
                <disk type='file' device='cdrom'>
                    <driver name='qemu' type='raw'/>
                    <source file='{iso_path}'/>
                    <target dev='hda' bus='ide'/>
                    <readonly/>
                </disk>
                <disk type='file' device='disk'>
                    <driver name='qemu' type='qcow2'/>
                    <source file='{disk_path}'/>
                    <target dev='vda' bus='virtio'/>
                </disk>
                {network_config}
                <graphics type='vnc' port='-1' autoport='yes'/>
                <console type='pty'/>
            </devices>
        </domain>
        """

        self.update_state(state='PROGRESS', meta={'progress': 80, 'message': '启动虚拟机'})
        
        with tempfile.NamedTemporaryFile(mode='w', suffix='.xml') as tmp_file:
            tmp_file.write(vm_xml)
            tmp_file.flush()
            
            try:
                subprocess.run([
                    "scp", tmp_file.name, f"root@{node_ip}:/tmp/{vm_id}.xml"
                ], check=True, timeout=30)
                
                subprocess.run([
                    "ssh", f"root@{node_ip}",
                    "virsh", "define", f"/tmp/{vm_id}.xml"
                ], check=True, timeout=30)
                
                subprocess.run([
                    "ssh", f"root@{node_ip}",
                    "virsh", "start", vm_id
                ], check=True, timeout=30)
                
                subprocess.run([
                    "ssh", f"root@{node_ip}",
                    "rm", f"/tmp/{vm_id}.xml"
                ], check=True, timeout=10)
                
            except subprocess.CalledProcessError as e:
                logger.error(f"定义或启动虚拟机失败: {e.stderr}")
                subprocess.run([
                    "ssh", f"root@{node_ip}",
                    "rm", "-f", disk_path
                ], check=False)
                return {"status": "error", "message": f"定义或启动虚拟机失败: {e.stderr}"}
            except subprocess.TimeoutExpired:
                logger.error("操作超时")
                return {"status": "error", "message": "操作超时"}

        self.update_state(state='PROGRESS', meta={'progress': 90, 'message': '保存虚拟机信息'})
        
        new_vm = {
            "id": vm_id,
            "name": vm_name,
            "status": "running",
            "node": node_id,
            "cpu": vm_config['cpu'],
            "memory": vm_config['memory'],
            "disk": vm_config['disk'],
            "ip": vm_config.get('ip', 'pending') if vm_config['networkType'] == 'bridge' and 'ip' in vm_config else 'pending',
            "os": 'Linux',  # 固定操作系统类型为 Linux
            "iso_id": iso_id,
            "iso_path": iso_path,
            "mac_address": mac_address,
            "createdAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "lastStartedAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "isTemplate": False
        }
        with redis_conn.pipeline() as pipe:
            pipe.hset('vms', vm_id, json.dumps(new_vm, ensure_ascii=False))
            pipe.execute()

        # 触发异步任务更新网络信息
        update_vm_network_info.apply_async(args=[vm_id], countdown=30)  # 延迟30秒等待 DHCP 分配

        logger.info(f"虚拟机创建成功: {vm_id}")
        return {
            "status": "success",
            "progress": 100,
            "vm_id": vm_id,
            "message": "虚拟机创建成功",
            "data": {
                "name": vm_name,
                "node": node_id,
                "iso": iso_name
            }
        }

    except Exception as e:
        logger.error(f"创建虚拟机时发生意外错误: {str(e)}", exc_info=True)
        return {"status": "error", "message": f"创建虚拟机时发生意外错误: {str(e)}"}
    
@celery.task(bind=True)
@log_task_execution("vm_start_async")
@with_redis_retry(max_retries=3)
def start_vm_async(self, vm_id):
    """异步启动虚拟机"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '初始化'})
        logger.info(f"开始启动虚拟机: {vm_id}")

        redis_conn = get_redis_client(role="master")
        vm = json.loads(redis_conn.hget('vms', vm_id) or '{}')
        if not vm:
            logger.error(f"虚拟机 {vm_id} 未找到")
            return {"status": "error", "message": f"虚拟机 {vm_id} 未找到"}

        if vm["status"] == "running":
            logger.warning(f"虚拟机 {vm_id} 已在运行")
            return {"status": "error", "message": f"虚拟机 {vm_id} 已在运行"}

        node_id = vm.get('node')
        node_data = json.loads(redis_conn.hget('nodes', node_id) or '{}')
        if not node_data or node_data.get('status') != '正常':
            logger.error(f"节点 {node_id} 无效或不可用")
            return {"status": "error", "message": f"节点 {node_id} 无效或不可用"}

        node_ip = node_data['ip']
        logger.info(f"目标节点: {node_id} ({node_ip})")

        self.update_state(state='PROGRESS', meta={'progress': 50, 'message': '执行启动操作'})
        try:
            subprocess.run([
                "ssh", f"root@{node_ip}",
                "virsh", "start", vm_id
            ], check=True, capture_output=True, text=True, timeout=30)
            logger.info(f"虚拟机 {vm_id} 启动成功")
        except subprocess.CalledProcessError as e:
            logger.error(f"启动虚拟机失败: {e.stderr}")
            return {"status": "error", "message": f"启动虚拟机失败: {e.stderr}"}
        except subprocess.TimeoutExpired:
            logger.error(f"启动虚拟机超时")
            return {"status": "error", "message": "启动虚拟机超时"}

        self.update_state(state='PROGRESS', meta={'progress': 90, 'message': '更新虚拟机状态'})
        vm["status"] = "running"
        vm["lastStartedAt"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        vm["ip"] = vm.get("ip", "pending")  # 重置为 pending，等待网络信息更新
        redis_conn.hset('vms', vm_id, json.dumps(vm, ensure_ascii=False))

        # 触发异步任务更新网络信息
        update_vm_network_info.apply_async(args=[vm_id], countdown=30)  # 延迟30秒等待 DHCP 分配

        logger.info(f"虚拟机 {vm_id} 启动完成")
        return {
            "status": "success",
            "progress": 100,
            "vm_id": vm_id,
            "message": f"虚拟机 {vm_id} 启动成功"
        }

    except Exception as e:
        logger.error(f"启动虚拟机时发生意外错误: {str(e)}")
        return {"status": "error", "message": f"启动虚拟机时发生意外错误: {str(e)}"}

@celery.task(bind=True)
@log_task_execution("vm_stop_async")
@with_redis_retry(max_retries=3)
def stop_vm_async(self, vm_id):
    """异步强制停止虚拟机"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '初始化'})
        logger.info(f"开始强制停止虚拟机: {vm_id}")

        redis_conn = get_redis_client(role="master")
        vm = json.loads(redis_conn.hget('vms', vm_id) or '{}')
        if not vm:
            logger.error(f"虚拟机 {vm_id} 未找到")
            return {"status": "error", "message": f"虚拟机 {vm_id} 未找到"}

        if vm["status"] != "running":
            logger.warning(f"虚拟机 {vm_id} 未运行")
            return {"status": "error", "message": f"虚拟机 {vm_id} 未运行"}

        node_id = vm.get('node')
        node_data = json.loads(redis_conn.hget('nodes', node_id) or '{}')
        if not node_data or node_data.get('status') != '正常':
            logger.error(f"节点 {node_id} 无效或不可用")
            return {"status": "error", "message": f"节点 {node_id} 无效或不可用"}

        node_ip = node_data['ip']
        logger.info(f"目标节点: {node_id} ({node_ip})")

        self.update_state(state='PROGRESS', meta={'progress': 50, 'message': '执行强制停止操作'})
        try:
            subprocess.run([
                "ssh", f"root@{node_ip}",
                "virsh", "destroy", vm_id
            ], check=True, capture_output=True, text=True, timeout=30)
            logger.info(f"虚拟机 {vm_id} 强制停止成功")
        except subprocess.CalledProcessError as e:
            logger.error(f"强制停止虚拟机失败: {e.stderr}")
            return {"status": "error", "message": f"强制停止虚拟机失败: {e.stderr}"}
        except subprocess.TimeoutExpired:
            logger.error(f"强制停止虚拟机超时")
            return {"status": "error", "message": "强制停止虚拟机超时"}

        self.update_state(state='PROGRESS', meta={'progress': 90, 'message': '更新虚拟机状态'})
        vm["status"] = "stopped"
        vm["ip"] = "N/A"  # 停止后清除 IP 地址
        redis_conn.hset('vms', vm_id, json.dumps(vm, ensure_ascii=False))

        logger.info(f"虚拟机 {vm_id} 强制停止完成")
        return {
            "status": "success",
            "progress": 100,
            "vm_id": vm_id,
            "message": f"虚拟机 {vm_id} 强制停止成功"
        }

    except Exception as e:
        logger.error(f"强制停止虚拟机时发生意外错误: {str(e)}")
        return {"status": "error", "message": f"强制停止虚拟机时发生意外错误: {str(e)}"}

@celery.task(bind=True)
@log_task_execution("vm_delete_async")
@with_redis_retry(max_retries=3)
def delete_vm_async(self, vm_id):
    """异步删除虚拟机"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '初始化'})
        logger.info(f"开始删除虚拟机: {vm_id}")

        redis_conn = get_redis_client(role="master")
        vm = json.loads(redis_conn.hget('vms', vm_id) or '{}')
        if not vm:
            logger.error(f"虚拟机 {vm_id} 未找到")
            return {"status": "error", "message": f"虚拟机 {vm_id} 未找到"}

        if vm["status"] == "running":
            logger.error(f"虚拟机 {vm_id} 正在运行，请先停止")
            return {"status": "error", "message": f"虚拟机 {vm_id} 正在运行，请先停止"}

        node_id = vm.get('node')
        node_data = json.loads(redis_conn.hget('nodes', node_id) or '{}')
        if not node_data or node_data.get('status') != '正常':
            logger.error(f"节点 {node_id} 无效或不可用")
            return {"status": "error", "message": f"节点 {node_id} 无效或不可用"}

        node_ip = node_data['ip']
        logger.info(f"目标节点: {node_id} ({node_ip})")

        disk_path = f"/var/lib/libvirt/images/{vm_id}.qcow2"

        self.update_state(state='PROGRESS', meta={'progress': 50, 'message': '执行删除操作'})
        try:
            subprocess.run([
                "ssh", f"root@{node_ip}",
                "virsh", "undefine", vm_id
            ], check=True, capture_output=True, text=True, timeout=30)
            logger.info(f"虚拟机 {vm_id} 已取消定义")
            
            subprocess.run([
                "ssh", f"root@{node_ip}",
                "rm", "-f", disk_path
            ], check=True, capture_output=True, text=True, timeout=30)
            logger.info(f"虚拟机 {vm_id} 磁盘已删除")
        except subprocess.CalledProcessError as e:
            logger.error(f"删除虚拟机失败: {e.stderr}")
            return {"status": "error", "message": f"删除虚拟机失败: {e.stderr}"}
        except subprocess.TimeoutExpired:
            logger.error(f"删除虚拟机超时")
            return {"status": "error", "message": "删除虚拟机超时"}

        self.update_state(state='PROGRESS', meta={'progress': 90, 'message': '清理虚拟机数据'})
        redis_conn.hdel('vms', vm_id)

        logger.info(f"虚拟机 {vm_id} 删除完成")
        return {
            "status": "success",
            "progress": 100,
            "vm_id": vm_id,
            "message": f"虚拟机 {vm_id} 删除成功"
        }

    except Exception as e:
        logger.error(f"删除虚拟机时发生意外错误: {str(e)}")
        return {"status": "error", "message": f"删除虚拟机时发生意外错误: {str(e)}"}

@celery.task(bind=True)
@log_task_execution("vm_migrate_async")
@with_redis_retry(max_retries=3)
def migrate_vm_async(self, vm_id, targetNode, type):
    """异步迁移虚拟机"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '开始迁移准备'})
        logger.info(f"开始迁移虚拟机 {vm_id} 到节点 {targetNode} ({type}迁移)")
        
        redis_conn = get_redis_client(role="master")
        
        # 获取虚拟机信息
        vm_data = json.loads(redis_conn.hget('vms', vm_id) or '{}')
        if not vm_data:
            raise ValueError(f"虚拟机 {vm_id} 不存在")
            
        source_node_id = vm_data['node']
        if source_node_id == targetNode:
            raise ValueError("不能迁移到当前所在节点")
            
        # 获取节点信息
        source_node = json.loads(redis_conn.hget('nodes', source_node_id) or '{}')
        target_node = json.loads(redis_conn.hget('nodes', targetNode) or '{}')
        
        if not source_node or not target_node:
            raise ValueError("源节点或目标节点不存在")
            
        # 检查目标节点状态
        if target_node.get('status') != '正常':
            raise ValueError("目标节点状态不正常")
            
        self.update_state(state='PROGRESS', meta={'progress': 20, 'message': '验证迁移条件'})
        
        # 冷迁移处理
        if type == 'cold':
            # 检查虚拟机状态
            if vm_data['status'] == 'running':
                raise ValueError("冷迁移需要先停止虚拟机")
                
            self.update_state(state='PROGRESS', meta={'progress': 30, 'message': '执行冷迁移'})
            
            # 1. 在源节点上获取虚拟机磁盘路径
            disk_path_cmd = f"virsh domblklist {vm_id} | grep vda | awk '{{print $2}}'"
            disk_path = subprocess.run(
                ["ssh", f"root@{source_node['ip']}", disk_path_cmd],
                capture_output=True, text=True, check=True
            ).stdout.strip()
            
            # 2. 传输磁盘文件到目标节点
            scp_cmd = [
                "scp", "-C", "-r", 
                f"root@{source_node['ip']}:{disk_path}", 
                f"root@{target_node['ip']}:{disk_path}"
            ]
            subprocess.run(scp_cmd, check=True)
            
            # 3. 在目标节点上定义虚拟机
            dumpxml_cmd = f"virsh dumpxml {vm_id}"
            vm_xml = subprocess.run(
                ["ssh", f"root@{source_node['ip']}", dumpxml_cmd],
                capture_output=True, text=True, check=True
            ).stdout
            
            # 修改XML中的目标节点信息
            vm_xml = vm_xml.replace(source_node['ip'], target_node['ip'])
            
            # 在目标节点上定义
            subprocess.run(
                ["ssh", f"root@{target_node['ip']}", "virsh", "define", "/dev/stdin"],
                input=vm_xml, text=True, check=True
            )
            
            # 4. 在源节点上删除虚拟机
            subprocess.run(
                ["ssh", f"root@{source_node['ip']}", "virsh", "undefine", vm_id],
                check=True
            )
            
        # 热迁移处理
        elif type == 'live':
            if vm_data['status'] != 'running':
                raise ValueError("热迁移需要虚拟机处于运行状态")
                
            self.update_state(state='PROGRESS', meta={'progress': 30, 'message': '执行热迁移'})
            
            # 使用virsh migrate命令进行热迁移，添加 --copy-storage-all 参数以支持非共享存储
            migrate_cmd = [
                "virsh", "migrate", "--live", vm_id, 
                f"qemu+ssh://root@{target_node['ip']}/system", 
                "--copy-storage-all", "--unsafe", "--persistent"
            ]
            subprocess.run(
                ["ssh", f"root@{source_node['ip']}"] + migrate_cmd,
                check=True
            )
            
        else:
            raise ValueError("无效的迁移类型")
        
        # 更新Redis中的虚拟机信息
        vm_data['node'] = targetNode
        vm_data['ip'] = 'pending'  # 迁移后重置 IP，等待更新
        redis_conn.hset('vms', vm_id, json.dumps(vm_data, ensure_ascii=False))
        
        # 触发异步任务更新网络信息
        update_vm_network_info.apply_async(args=[vm_id], countdown=30)

        self.update_state(state='PROGRESS', meta={'progress': 90, 'message': '更新系统状态'})
        
        # 记录迁移日志
        redis_conn.lpush('logs', json.dumps({
            "id": str(uuid.uuid4()),
            "type": "operation",
            "action": "vm_migrate",
            "vm_id": vm_id,
            "from_node": source_node_id,
            "to_node": targetNode,
            "migration_type": type,
            "timestamp": time.time(),
            "user": "system",
            "status": "success"
        }, ensure_ascii=False))
        
        self.update_state(state='SUCCESS', meta={'progress': 100, 'message': '迁移完成'})
        return {
            "status": "success",
            "message": f"虚拟机 {vm_id} 迁移到节点 {targetNode} 成功",
            "vm_id": vm_id,
            "new_node": targetNode
        }
        
    except subprocess.CalledProcessError as e:
        error_msg = f"迁移失败: {e.stderr.strip() if e.stderr else str(e)}"
        logger.error(error_msg)
        raise ValueError(error_msg)
    except Exception as e:
        logger.error(f"迁移过程中发生错误: {str(e)}")
        raise

@celery.task(bind=True)
@log_task_execution("generate_system_report")
@with_redis_retry(max_retries=3)
def generate_system_report(self):
    """生成系统报告"""
    redis_conn = get_redis_client(role="master")
    users = [json.loads(v) for v in redis_conn.hgetall('users').values()]
    vms = [json.loads(v) for v in redis_conn.hgetall('vms').values()]
    tokens = redis_conn.hgetall('tokens')
    alarms = [json.loads(v) for v in redis_conn.hgetall('alarms').values()]
    nodes = [json.loads(v) for v in redis_conn.hgetall('nodes').values()]
    
    report = {
        "timestamp": datetime.now().isoformat(),
        "total_users": len(users),
        "active_users": len([u for u in users if u.get('status') == 'ACTIVE']),
        "total_vms": len(vms),
        "running_vms": len([v for v in vms if v.get('status') == 'running']),
        "active_tokens": len(tokens),
        "recent_alarms": len(alarms),
        "active_nodes": len([n for n in nodes if n['status'] == '正常'])
    }
    
    redis_conn.hset('reports', report['timestamp'], json.dumps(report, ensure_ascii=False))
    logger.info(f"System report generated: {report['timestamp']}")
    return {"status": "success", "report": report}

@celery.task(bind=True)
@log_task_execution("update_iso_paths")
@with_redis_retry(max_retries=3)
def update_iso_paths(self):
    """定期扫描并更新所有节点的ISO路径信息，自动合并相同名称的ISO"""
    self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '开始更新ISO路径'})
    redis_conn = get_redis_client(role="master")
    iso_dirs = [MultiNodeConfig.ISO_DIR1, MultiNodeConfig.ISO_DIR2]
    nodes = [json.loads(v) for v in redis_conn.hgetall('nodes').values()]
    
    # 按文件名分组读取现有ISO（自动合并相同名称）
    existing_isos = {}
    for iso_id, iso_data in redis_conn.hgetall('isos').items():
        iso = json.loads(iso_data)
        if iso['name'] not in existing_isos:
            existing_isos[iso['name']] = {
                "id": iso_id,
                "name": iso['name'],
                "paths": iso.get('paths', {}),
                "createdAt": iso.get('createdAt', datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
            }
        else:
            # 合并路径信息
            existing_isos[iso['name']]['paths'].update(iso.get('paths', {}))
    
    updated_isos = {}
    total_nodes = len(nodes)
    
    try:
        # 扫描所有节点
        for index, node in enumerate(nodes, 1):
            node_id = node['id']
            node_ip = node['ip']
            progress = 20 + (index / total_nodes) * 60
            self.update_state(state='PROGRESS', meta={
                'progress': progress,
                'message': f'扫描节点 {node_id} ({node_ip})'
            })

            # 检查节点可达性
            try:
                subprocess.run(
                    ["ping", "-c", "1", node_ip],
                    capture_output=True, text=True, check=True, timeout=5
                )
            except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
                logger.warning(f"节点 {node_id} ({node_ip}) 不可达: {str(e)}")
                continue

            # 扫描每个ISO目录
            for iso_dir in iso_dirs:
                try:
                    result = subprocess.run(
                        ["ssh", "-o", "StrictHostKeyChecking=no", 
                         f"root@{node_ip}", "ls", iso_dir],
                        capture_output=True, text=True, check=True, timeout=10
                    )
                    files = [f for f in result.stdout.strip().split('\n') if f.endswith('.iso')]
                    
                    for filename in files:
                        iso_path = os.path.join(iso_dir, filename)
                        
                        # 自动合并相同名称的ISO
                        if filename not in updated_isos:
                            # 优先使用已存在的ISO ID
                            iso_id = next(
                                (iso['id'] for iso in existing_isos.values() 
                                 if iso['name'] == filename),
                                str(uuid.uuid4())
                            )
                            updated_isos[filename] = {
                                "id": iso_id,
                                "name": filename,
                                "paths": {},
                                "createdAt": existing_isos.get(filename, {}).get(
                                    'createdAt', 
                                    datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                                )
                            }
                            # 合并已有路径（排除当前节点，避免旧数据覆盖新扫描）
                            if filename in existing_isos:
                                for nid, path in existing_isos[filename]['paths'].items():
                                    if nid != node_id:
                                        updated_isos[filename]['paths'][nid] = path
                        
                        updated_isos[filename]['paths'][node_id] = iso_path
                        logger.debug(f"Found ISO: {filename} at {node_id}:{iso_path}")

                except subprocess.CalledProcessError as e:
                    logger.warning(f"扫描 {iso_dir} 失败 (节点 {node_id}): {e.stderr}")
                except Exception as e:
                    logger.error(f"扫描 {iso_dir} 时发生意外错误 (节点 {node_id}): {str(e)}")

        # 保存更新
        self.update_state(state='PROGRESS', meta={
            'progress': 90,
            'message': '保存ISO路径信息'
        })
        
        pipeline = redis_conn.pipeline()
        pipeline.delete('isos')  # 清空后重新添加，确保数据一致性
        for iso in updated_isos.values():
            pipeline.hset('isos', iso['id'], json.dumps(iso, ensure_ascii=False))
        pipeline.execute()

        self.update_state(state='SUCCESS', meta={
            'progress': 100,
            'message': f'更新完成，共处理 {len(updated_isos)} 个ISO文件'
        })
        return {
            "status": "success",
            "message": f"Updated {len(updated_isos)} ISO files",
            "isos_updated": len(updated_isos),
            "unique_isos": len({iso['name'] for iso in updated_isos.values()})
        }

    except Exception as e:
        logger.error(f"更新ISO路径失败: {str(e)}", exc_info=True)
        self.update_state(state='FAILURE', meta={
            'message': f"ISO路径更新失败: {str(e)}"
        })
        return {
            "status": "error",
            "message": f"Failed to update ISO paths: {str(e)}"
        }

@celery.task(bind=True)
@log_task_execution("scan_and_init_vms")
@with_redis_retry(max_retries=3)
def scan_and_init_vms(self):
    """扫描并初始化所有节点上的虚拟机和ISO文件"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '开始扫描节点'})
        redis_conn = get_redis_client(role="master")
        
        # 获取所有节点
        nodes = [json.loads(v) for v in redis_conn.hgetall('nodes').values()]
        if not nodes:
            logger.warning("No nodes found in Redis")
            return {"status": "error", "message": "No nodes available"}
        
        # 扫描ISO文件
        iso_dirs = [MultiNodeConfig.ISO_DIR1, MultiNodeConfig.ISO_DIR2]
        isos = {}
        
        # 扫描虚拟机
        vms = {}
        
        for index, node in enumerate(nodes, 1):
            node_id = node['id']
            node_ip = node['ip']
            
            progress = int((index / len(nodes)) * 100)
            self.update_state(
                state='PROGRESS',
                meta={
                    'progress': progress,
                    'message': f'扫描节点 {node_id} ({node_ip})'
                }
            )
            
            try:
                # 1. 扫描ISO文件
                for iso_dir in iso_dirs:
                    try:
                        result = subprocess.run(
                            ["ssh", "-o", "StrictHostKeyChecking=no", 
                             f"root@{node_ip}", "ls", iso_dir],
                            capture_output=True, text=True, check=True,
                            timeout=10
                        )
                        files = result.stdout.strip().split('\n')
                        
                        for filename in files:
                            if filename and filename.endswith('.iso'):
                                iso_path = os.path.join(iso_dir, filename)
                                if filename not in isos:
                                    isos[filename] = {
                                        "id": str(uuid.uuid4()),
                                        "name": filename,
                                        "paths": {},
                                        "createdAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                                    }
                                isos[filename]["paths"][node_id] = iso_path
                                logger.info(f"Found ISO on {node_id}: {iso_path}")
                    except subprocess.CalledProcessError as e:
                        logger.error(f"Failed to scan {iso_dir} on {node_id}: {e.stderr}")
                
                # 2. 扫描虚拟机
                try:
                    result = subprocess.run(
                        ["ssh", "-o", "StrictHostKeyChecking=no",
                         f"root@{node_ip}", "virsh", "list", "--all"],
                        capture_output=True, text=True, check=True,
                        timeout=15
                    )
                    lines = result.stdout.strip().split('\n')[2:]  # 跳过标题行
                    
                    for line in lines:
                        if line.strip():
                            parts = line.split()
                            vm_id = parts[1]
                            vm_status = parts[2]
                            
                            # 获取虚拟机详细信息
                            try:
                                dominfo = subprocess.run(
                                    ["ssh", "-o", "StrictHostKeyChecking=no",
                                     f"root@{node_ip}", "virsh", "dominfo", vm_id],
                                    capture_output=True, text=True, check=True,
                                    timeout=10
                                )
                                
                                # 获取 MAC 地址
                                mac_cmd = f"virsh domiflist {vm_id} | grep network | awk '{{print $3}}'"
                                mac_result = subprocess.run(
                                    ["ssh", f"root@{node_ip}", mac_cmd],
                                    capture_output=True, text=True, check=True, timeout=10
                                )
                                mac_address = mac_result.stdout.strip() or "unknown"

                                # 解析虚拟机信息
                                vm_info = {
                                    "id": vm_id,
                                    "name": vm_id,
                                    "status": vm_status.lower(),
                                    "node": node_id,
                                    "cpu": 2,  # 默认值，实际应从XML获取
                                    "memory": 2048,
                                    "disk": 20,
                                    "ip": "pending",
                                    "os": "Linux",  # 固定操作系统类型为 Linux
                                    "mac_address": mac_address,
                                    "createdAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                                    "lastStartedAt": "",
                                    "isTemplate": False
                                }
                                vms[vm_id] = vm_info
                            except subprocess.CalledProcessError as e:
                                logger.error(f"Failed to get info for VM {vm_id} on {node_id}: {e.stderr}")
                except subprocess.CalledProcessError as e:
                    logger.error(f"Failed to list VMs on {node_id}: {e.stderr}")
                
            except Exception as e:
                logger.error(f"Error scanning node {node_id}: {str(e)}")
                continue
        
        # 存储结果到Redis
        pipeline = redis_conn.pipeline()
        
        # 更新ISO文件
        for iso in isos.values():
            pipeline.hset('isos', iso['id'], json.dumps(iso, ensure_ascii=False))
        
        # 更新虚拟机
        for vm_id, vm_info in vms.items():
            pipeline.hset('vms', vm_id, json.dumps(vm_info, ensure_ascii=False))
        
        pipeline.execute()
        
        # 触发网络信息更新任务
        for vm_id, vm_info in vms.items():
            if vm_info["status"] == "running":
                update_vm_network_info.apply_async(args=[vm_id], countdown=30)

        logger.info(f"Scanned {len(nodes)} nodes, found {len(isos)} ISOs and {len(vms)} VMs")
        return {
            "status": "success",
            "nodes_scanned": len(nodes),
            "isos_found": len(isos),
            "vms_found": len(vms)
        }
        
    except Exception as e:
        logger.error(f"Failed to scan and init VMs: {str(e)}")
        return {"status": "error", "message": str(e)}


@celery.task(bind=True)
@log_task_execution("check_network_connectivity")
@with_redis_retry(max_retries=3)
def check_network_connectivity(self):
    """检查所有节点的网络连通性并生成报警"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '开始网络连通性检测'})
        redis_conn = get_redis_client(role="master")
        nodes = [json.loads(v) for v in redis_conn.hgetall('nodes').values()]
        total_nodes = len(nodes)
        network_status = {}
        
        for index, node in enumerate(nodes, 1):
            node_id = node['id']
            node_ip = node['ip']
            progress = (index / total_nodes) * 100
            self.update_state(state='PROGRESS', meta={
                'progress': progress,
                'message': f'检测节点 {node_id} ({node_ip}) 的网络连通性'
            })
            
            try:
                # 使用 ping3 测试网络延迟和丢包率
                delay = ping(node_ip, timeout=2)
                if delay is None:
                    logger.warning(f"节点 {node_id} ({node_ip}) 无响应")
                    network_status[node_id] = {
                        "status": "unreachable",
                        "delay": None,
                        "packet_loss": 100.0
                    }
                    # 生成网络中断报警
                    alarm = {
                        "id": str(uuid.uuid4()),
                        "type": "网络中断",
                        "node": node_id,
                        "timestamp": time.time(),
                        "status": "unprocessed",
                        "content": f"节点 {node_id} ({node_ip}) 网络连接中断"
                    }
                    redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))
                else:
                    # 测试丢包率（发送 4 个数据包）
                    packet_loss = 0.0
                    for _ in range(4):
                        if ping(node_ip, timeout=2) is None:
                            packet_loss += 25.0
                    network_status[node_id] = {
                        "status": "reachable",
                        "delay": round(delay * 1000, 2),  # 转换为毫秒
                        "packet_loss": packet_loss
                    }
                    # 如果丢包率超过 50% 或延迟超过 500ms，生成报警
                    if packet_loss > 50 or delay * 1000 > 500:
                        alarm = {
                            "id": str(uuid.uuid4()),
                            "type": "网络异常",
                            "node": node_id,
                            "timestamp": time.time(),
                            "status": "unprocessed",
                            "content": f"节点 {node_id} ({node_ip}) 网络异常，延迟: {round(delay * 1000, 2)}ms，丢包率: {packet_loss}%"
                        }
                        redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))
                    logger.info(f"节点 {node_id} ({node_ip}) 网络状态: 延迟={round(delay * 1000, 2)}ms, 丢包率={packet_loss}%")
            
            except Exception as e:
                logger.error(f"检测节点 {node_id} ({node_ip}) 网络连通性失败: {str(e)}")
                network_status[node_id] = {
                    "status": "error",
                    "delay": None,
                    "packet_loss": None
                }
                alarm = {
                    "id": str(uuid.uuid4()),
                    "type": "网络检测失败",
                    "node": node_id,
                    "timestamp": time.time(),
                    "status": "unprocessed",
                    "content": f"检测节点 {node_id} ({node_ip}) 网络连通性失败: {str(e)}"
                }
                redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))
        
        # 保存网络状态到 Redis
        redis_conn.set('network_status', json.dumps(network_status, ensure_ascii=False))
        
        self.update_state(state='SUCCESS', meta={'progress': 100, 'message': '网络连通性检测完成'})
        return {
            "status": "success",
            "message": f"Checked network connectivity for {total_nodes} nodes",
            "network_status": network_status
        }
    
    except Exception as e:
        logger.error(f"网络连通性检测失败: {str(e)}")
        self.update_state(state='FAILURE', meta={'message': f"网络连通性检测失败: {str(e)}"})
        return {"status": "error", "message": f"Failed to check network connectivity: {str(e)}"}

@celery.task(bind=True)
@log_task_execution("check_network_connectivity")
@with_redis_retry(max_retries=3)
def check_network_connectivity(self):
    """定时检测所有节点的网络连通性并生成报警"""
    try:
        self.update_state(state='PROGRESS', meta={'progress': 0, 'message': '开始网络连通性检测'})
        redis_conn = get_redis_client(role="master")
        nodes = [json.loads(v) for v in redis_conn.hgetall('nodes').values()]
        total_nodes = len(nodes)
        network_status = {}

        # 配置检测参数
        PING_COUNT = 4
        PING_TIMEOUT = 2
        HIGH_LATENCY_THRESHOLD = 500
        HIGH_PACKET_LOSS_THRESHOLD = 50

        for index, node in enumerate(nodes, 1):
            node_id = node['id']
            node_ip = node['ip']
            node_name = node.get('name', node_id)
            progress = (index / total_nodes) * 100
            self.update_state(state='PROGRESS', meta={
                'progress': progress,
                'message': f'检测节点 {node_name} ({node_ip}) 的网络连通性'
            })

            try:
                successful_pings = 0
                total_latency = 0.0
                min_latency = float('inf')
                max_latency = 0.0

                for _ in range(PING_COUNT):
                    try:
                        latency = ping(node_ip, timeout=PING_TIMEOUT, unit='ms')
                        if latency is not None:
                            successful_pings += 1
                            total_latency += latency
                            min_latency = min(min_latency, latency)
                            max_latency = max(max_latency, latency)
                    except Exception as e:
                        logger.debug(f"Ping to {node_ip} failed: {str(e)}")
                        continue

                packet_loss = ((PING_COUNT - successful_pings) / PING_COUNT) * 100
                status = 'offline' if successful_pings == 0 else 'online'
                avg_latency = total_latency / successful_pings if successful_pings > 0 else None

                # Prepare node stats
                node_stats = {
                    'networkStatus': status,
                    'lastCheck': time.time(),
                    'packetLoss': round(packet_loss, 2),
                }
                if avg_latency is not None:
                    node_stats.update({
                        'latency': round(avg_latency, 2),
                        'minLatency': round(min_latency, 2),
                        'maxLatency': round(max_latency, 2),
                        'avgLatency': round(avg_latency, 2),
                    })

                # Update node in Redis
                node.update(node_stats)
                redis_conn.hset('nodes', node_id, json.dumps(node, ensure_ascii=False))

                # Store in network_status for API
                network_status[node_id] = {
                    "node_id": node_id,
                    "status": status,
                    "latency": round(avg_latency, 2) if avg_latency else None,
                    "min_latency": round(min_latency, 2) if successful_pings > 0 else None,
                    "max_latency": round(max_latency, 2) if successful_pings > 0 else None,
                    "avg_latency": round(avg_latency, 2) if avg_latency else None,
                    "packet_loss": round(packet_loss, 2),
                    "last_check": node_stats['lastCheck']
                }

                # Generate alarms
                if status == 'offline':
                    alarm = {
                        "id": str(uuid.uuid4()),
                        "type": "网络中断",
                        "node": node_name,
                        "timestamp": time.time(),
                        "status": "unprocessed",
                        "content": f"节点 {node_name} ({node_ip}) 网络连接中断"
                    }
                    redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))
                    sse.publish({
                        "type": "network-alert",
                        "message": alarm['content'],
                        "level": "critical",
                        "timestamp": alarm['timestamp'],
                        "node_id": node_id,
                        "node_ip": node_ip
                    }, type='network-alert')
                elif packet_loss >= HIGH_PACKET_LOSS_THRESHOLD:
                    alarm = {
                        "id": str(uuid.uuid4()),
                        "type": "网络异常",
                        "node": node_name,
                        "timestamp": time.time(),
                        "status": "unprocessed",
                        "content": f"节点 {node_name} ({node_ip}) 网络丢包率高: {round(packet_loss, 2)}%"
                    }
                    redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))
                    sse.publish({
                        "type": "network-alert",
                        "message": alarm['content'],
                        "level": "warning",
                        "timestamp": alarm['timestamp'],
                        "node_id": node_id,
                        "node_ip": node_ip
                    }, type='network-alert')
                elif avg_latency and avg_latency >= HIGH_LATENCY_THRESHOLD:
                    alarm = {
                        "id": str(uuid.uuid4()),
                        "type": "网络延迟",
                        "node": node_name,
                        "timestamp": time.time(),
                        "status": "unprocessed",
                        "content": f"节点 {node_name} ({node_ip}) 网络延迟高: 平均 {round(avg_latency, 2)}ms"
                    }
                    redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))
                    sse.publish({
                        "type": "network-alert",
                        "message": alarm['content'],
                        "level": "warning",
                        "timestamp": alarm['timestamp'],
                        "node_id": node_id,
                        "node_ip": node_ip
                    }, type='network-alert')

                logger.info(
                    f"节点 {node_name} ({node_ip}) 网络状态: "
                    f"状态={status}, "
                    f"丢包率={round(packet_loss, 2)}%, "
                    f"平均延迟={round(avg_latency, 2) if avg_latency else 'N/A'}ms, "
                    f"最小延迟={round(min_latency, 2) if successful_pings > 0 else 'N/A'}ms, "
                    f"最大延迟={round(max_latency, 2) if successful_pings > 0 else 'N/A'}ms"
                )

            except Exception as e:
                logger.error(f"检测节点 {node_name} ({node_ip}) 网络连通性失败: {str(e)}")
                network_status[node_id] = {
                    "node_id": node_id,
                    "status": "error",
                    "latency": None,
                    "min_latency": None,
                    "max_latency": None,
                    "avg_latency": None,
                    "packet_loss": None,
                    "last_check": time.time()
                }
                alarm = {
                    "id": str(uuid.uuid4()),
                    "type": "网络检测失败",
                    "node": node_name,
                    "timestamp": time.time(),
                    "status": "unprocessed",
                    "content": f"检测节点 {node_name} ({node_ip}) 网络连通性失败: {str(e)}"
                }
                redis_conn.hset('alarms', alarm['id'], json.dumps(alarm, ensure_ascii=False))

        # Save network status to Redis
        redis_conn.set('network_status', json.dumps(network_status, ensure_ascii=False))

        self.update_state(state='SUCCESS', meta={'progress': 100, 'message': '网络连通性检测完成'})
        return {
            "status": "success",
            "message": f"已检测 {total_nodes} 个节点的网络连通性",
            "network_status": network_status,
            "stats": {
                "online_nodes": sum(1 for s in network_status.values() if s.get('status') == 'online'),
                "offline_nodes": sum(1 for s in network_status.values() if s.get('status') == 'offline'),
                "error_nodes": sum(1 for s in network_status.values() if s.get('status') == 'error'),
                "avg_packet_loss": round(
                    sum(s.get('packet_loss', 0) for s in network_status.values() if s.get('packet_loss') is not None) /
                    max(1, sum(1 for s in network_status.values() if s.get('packet_loss') is not None)), 2
                ),
                "avg_latency": round(
                    sum(s.get('avg_latency', 0) for s in network_status.values() if s.get('avg_latency') is not None) /
                    max(1, sum(1 for s in network_status.values() if s.get('avg_latency') is not None)), 2
                )
            }
        }

    except Exception as e:
        logger.error(f"网络连通性检测失败: {str(e)}", exc_info=True)
        self.update_state(state='FAILURE', meta={'message': f"网络连通性检测失败: {str(e)}"})
        return {
            "status": "error",
            "message": f"网络连通性检测失败: {str(e)}",
            "error": str(e)
        }