import argparse
import socket
import time
from celery_app import celery, load_config
from tasks import register_host, get_redis_client
from app import app, init_redis_data
from config import MultiNodeConfig
import logging
import subprocess
import os
from celery.result import AsyncResult

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def start_master():
    """启动主节点"""
    load_config(role="master")
    
    logger.info("Starting master node")
    redis_host = MultiNodeConfig.get_redis_host(role="master")
    redis_client = get_redis_client(role="master")
    
    try:
        redis_client.ping()
        logger.info(f"Redis connection successful at {redis_host}:{MultiNodeConfig.REDIS_PORT}")
    except Exception as e:
        logger.error(f"Failed to connect to Redis: {str(e)}")
        raise RuntimeError(f"Failed to connect to Redis: {str(e)}")

    hostname = socket.gethostname()
    node_id = f"node-{hostname}"
    current_master = redis_client.get('master_node')
    
    if current_master and current_master != node_id:
        logger.error(f"Another master node is active: {current_master}")
        raise RuntimeError(f"Master node conflict: {current_master} is already active")

    redis_client.set('master_ip', redis_host)
    logger.info(f"Stored master_ip: {redis_host}")

    # 注册主节点
    max_retries = 3
    for attempt in range(max_retries):
        try:
            task = register_host.apply(args=["master"])
            result = task.get(timeout=10)
            logger.info(f"Master node registered successfully: {result}")
            break
        except Exception as e:
            logger.warning(f"Failed to register master node (attempt {attempt + 1}): {str(e)}")
            if attempt == max_retries - 1:
                logger.error("All attempts to register master node failed")
                raise RuntimeError(f"Failed to register master node: {str(e)}")
            time.sleep(2)

    # 启动 Celery Worker
    logger.info("Starting Celery Worker...")
    celery_worker = subprocess.Popen([
        "celery", "-A", "celery_app", "worker", "--loglevel=info", "--concurrency=4","--queues=high_priority,default" 
    ], env={
        **os.environ,
        "CELERY_BROKER_URL": celery.conf.broker_url,
        "CELERY_RESULT_BACKEND": celery.conf.result_backend
    })

    # 启动 Celery Beat
    logger.info("Starting Celery Beat...")
    celery_beat = subprocess.Popen([
        "celery", "-A", "celery_app", "beat", "--loglevel=info"
    ], env={
        **os.environ,
        "CELERY_BROKER_URL": celery.conf.broker_url,
        "CELERY_RESULT_BACKEND": celery.conf.result_backend
    })

    # 初始化 Redis 数据
    logger.info("Initializing Redis data...")
    try:
        init_redis_data()
        logger.info("Redis data initialized successfully")
    except Exception as e:
        logger.error(f"Failed to initialize Redis data: {str(e)}")
        raise RuntimeError(f"Failed to initialize Redis data: {str(e)}")

    # 同步调用 scan_and_init_vms 任务一次
    logger.info("Triggering VM scanning and initialization...")
    try:
        task = celery.send_task('tasks.scan_and_init_vms')
        result = task.get(timeout=60)  # 等待任务完成，最多 60 秒
        logger.info(f"VM scanning and initialization completed: {result}")
    except Exception as e:
        logger.error(f"Failed to scan and initialize VMs: {str(e)}")
        logger.warning("Continuing master startup despite VM initialization failure")

    # 启动 Flask 服务器
    logger.info("Starting Flask server...")
    app.run(host='0.0.0.0', port=MultiNodeConfig.MASTER_PORT, debug=True, use_reloader=False)

def start_worker(master_ip):
    """启动从节点"""
    if master_ip:
        MultiNodeConfig.MASTER_IP = master_ip
    elif not MultiNodeConfig.MASTER_IP:
        logger.error("Master IP is required for worker node")
        raise ValueError("Master IP is required (use --master-ip or set MASTER_IP environment variable)")

    load_config(role="worker")
    
    logger.info("Starting worker node")
    redis_host = MultiNodeConfig.get_redis_host(role="worker")
    redis_client = get_redis_client(role="worker")
    
    try:
        redis_client.ping()
        logger.info(f"Redis connection successful at {redis_host}:{MultiNodeConfig.REDIS_PORT}")
    except Exception as e:
        logger.error(f"Failed to connect to Redis: {str(e)}")
        raise RuntimeError(f"Failed to connect to Redis: {str(e)}")

    # 等待获取主节点IP
    max_retries = 5
    for attempt in range(max_retries):
        master_ip = redis_client.get('master_ip')
        if master_ip:
            logger.info(f"Retrieved master_ip: {master_ip}")
            break
        logger.warning(f"Master IP not found in Redis (attempt {attempt + 1})")
        if attempt == max_retries - 1:
            logger.error("Failed to retrieve master IP from Redis")
            raise RuntimeError("Master IP not found in Redis")
        time.sleep(5)

    # 注册从节点
    max_retries = 3
    for attempt in range(max_retries):
        try:
            task = register_host.apply(args=["worker"])
            result = task.get(timeout=10)
            logger.info(f"Worker node registered successfully: {result}")
            break
        except Exception as e:
            logger.warning(f"Failed to register worker node (attempt {attempt + 1}): {str(e)}")
            if attempt == max_retries - 1:
                logger.error("All attempts to register worker node failed")
                raise RuntimeError(f"Failed to register worker node: {str(e)}")
            time.sleep(2)

    # 启动后立即执行一次扫描任务
    logger.info("Triggering initial VM and ISO scan...")
    try:
        task = celery.send_task('tasks.scan_and_init_vms')
        result = task.get(timeout=120)  # 延长超时时间
        logger.info(f"Initial scan completed: {result}")
    except Exception as e:
        logger.error(f"Failed to perform initial scan: {str(e)}")
        # 不阻止启动，但记录错误

    # 启动 Celery Worker
    logger.info("Starting Celery Worker...")
    celery_worker = subprocess.Popen([
        "celery", "-A", "celery_app", "worker", 
        "--loglevel=info", "--concurrency=4",
        "--queues=high_priority,default"
    ], env={
        **os.environ,
        "CELERY_BROKER_URL": celery.conf.broker_url,
        "CELERY_RESULT_BACKEND": celery.conf.result_backend
    })

    # 等待 Celery Worker 运行
    try:
        celery_worker.wait()
    except KeyboardInterrupt:
        logger.info("Shutting down worker node")
        celery_worker.terminate()

def main():
    parser = argparse.ArgumentParser(description="Start a node in the cloud service management system")
    parser.add_argument('--role', choices=['master', 'worker'], required=True, help='Node role: master or worker')
    parser.add_argument('--master-ip', help='Master node IP address (required for worker role)', default=os.getenv('MASTER_IP'))
    args = parser.parse_args()

    if args.role == 'master':
        start_master()
    else:
        start_worker(args.master_ip)

if __name__ == '__main__':
    main()