# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import os
import json
import subprocess
import multiprocessing
from enum import Enum
from pathlib import Path

import torch

from mindspeed_rl.utils.loggers import Loggers
from mindspeed_rl.trainer.auto_parallel.system_config import SystemConfig


logger = Loggers("launch")


class TaskType(Enum):
    GRPO = 'grpo'
    GENERATION = 'generate_sequences'
    REFERENCE = 'reference'
    UPDATE = 'update'
    ELSE = 'else'
    EXIT = 'exit'


def wait_for_connect(nnodes, master_addr, master_port):
    import ray
    import time
    ray.init(address="{}:{}".format(master_addr, master_port))

    while True:
        nodes = ray.nodes()
        alive_nodes = [node for node in nodes if node['alive']]
        if len(alive_nodes) == nnodes:
            break
        time.sleep(5)


class Launch:

    @classmethod
    def broadcast(cls, task_type=None):
        if SystemConfig.node_rank == 0:
            obj_list = [task_type.value]
        else:
            obj_list = [None]
        torch.distributed.broadcast_object_list(obj_list, src=0)

    @classmethod
    def execute_cmd(cls, commands, blocking=True):
        try:
            process = subprocess.Popen(commands, shell=False, stdout=None, stderr=None)
            if blocking:
                process.wait()
        except BaseException as e:
            logger.error(e)

    @classmethod
    def stop_ray_instance(cls):
        commands = ["ray", "stop", "--force"]
        cls.execute_cmd(commands)

    @classmethod
    def launch_on_head(cls, config_path, config_name):
        cls.stop_ray_instance()

        resources = {'NPU': SystemConfig.nproc_per_node}
        cmd = [
            "ray", "start", "--head", 
            "--port={}".format(SystemConfig.master_port + 1),
            "--dashboard-port={}".format(SystemConfig.master_port + 2),
            "--node-ip-address={}".format(SystemConfig.current_addr),
            "--resources={}".format(json.dumps(resources))
        ]
        cls.execute_cmd(cmd)

        process = multiprocessing.Process(
            target=wait_for_connect, 
            args=(SystemConfig.nnodes, SystemConfig.master_addr, SystemConfig.master_port + 1),
        )
        process.start()
        process.join()

        cmd = [
            "python", "cli/train_grpo.py",
            "--config-dir", os.path.abspath(os.path.join(config_path, os.pardir)),
            "--config-name", config_name
        ]
        cls.execute_cmd(cmd)

    @classmethod
    def launch_on_slave(cls):
        import time
        cls.stop_ray_instance()

        resources = {'NPU': SystemConfig.nproc_per_node}
        while True:
            cmd = [
                "ray", "start",
                "--address={}:{}".format(SystemConfig.master_addr, SystemConfig.master_port + 1),
                "--resources={}".format(json.dumps(resources)),
                "--node-ip-address={}".format(SystemConfig.current_addr)
            ]
            cls.execute_cmd(cmd)

            cmd = ["ray", "status"]
            process = cls.execute_cmd(cmd)
            if process.returncode == 0:
                logger.info("Successfully connected to the Ray cluster")
                break
            else:
                logger.warning("Failed to connect to the Ray cluster. Retrying in 5 seconds")
                time.sleep(5)
                
    @classmethod
    def launch(cls, config: dict, task_type: TaskType):
        config_name = 'profiler.yaml'
        config_file_path = Path(SystemConfig.rl_config.auto_parallel.work_dir + os.sep + config_name)
        try:
            if config_file_path.exists():
                config_file_path.unlink()
                
            from omegaconf import OmegaConf
            OmegaConf.save(config, config_file_path)
        except FileNotFoundError:
            logger.error(f"path is not exist, config_file_path: {config_file_path}")
            return
        cls.broadcast(task_type)
        cls.launch_on_head(config_file_path, config_name)