# Copyright (c) 2025, HUAWEI CORPORATION. All rights reserved.
import os
import datetime

import torch

from mindspeed_rl.config_cls.megatron_config import MegatronConfig
from mindspeed_rl.config_cls.generate_config import GenerateConfig
from mindspeed_rl.config_cls.rl_config import RLConfig


class SystemConfig:
    master_addr: str
    master_port: int
    current_addr: str
    nnodes: int
    node_rank: int
    nproc_per_node: int

    actor_config: MegatronConfig
    ref_config: MegatronConfig
    reward_config: MegatronConfig
    generate_config: GenerateConfig
    rl_config: RLConfig

    generate_profile_path_fmt: str
    actor_profile_path_fmt: str
    ref_profile_path_fmt: str
    transfer_dock_path: str
    model_profile_path: str

    unit_gb: int
    p2p_band_width: float

    @classmethod
    def init(cls, config, parse_training_config_callable):
        cls.nnodes = int(os.getenv('NNODES', '1'))
        cls.master_port = int(os.getenv('MASTER_PORT', '6000'))
        cls.nproc_per_node = int(os.getenv('NPUS_PER_NODE', '8'))
        if cls.nnodes <= 1:
            cls.master_addr = 'localhost'
            cls.current_addr = 'localhost'
            cls.node_rank = 0
        else:
            if any([True for key in ['MASTER_ADDR', 'CURRENT_ADDR', 'NODE_RANK'] if key not in os.environ]):
                raise AssertionError("not find enviroment variable.")
            cls.master_addr = os.getenv('MASTER_ADDR', '')
            cls.current_addr = os.getenv('CURRENT_ADDR', '')
            cls.node_rank = int(os.getenv('NODE_RANK', '0'))
        
        actor_config, ref_config, reward_config, rl_config, generate_config, _, _ = parse_training_config_callable(config).values()
        cls.actor_config = actor_config
        cls.ref_config = ref_config
        cls.reward_config = reward_config
        cls.rl_config = rl_config
        cls.generate_config = generate_config

        cls.generate_profile_path_fmt = f"{rl_config.auto_parallel.work_dir}/generation_{{}}_prof.json"
        cls.ref_profile_path_fmt = f"{rl_config.auto_parallel.work_dir}/ref_{{}}_prof.json"
        cls.actor_profile_path_fmt = f"{rl_config.auto_parallel.work_dir}/actor_{{}}_prof.json"
        cls.transfer_dock_path = f"{rl_config.auto_parallel.work_dir}/transfer_dock.pth"
        cls.model_profile_path = f"{rl_config.auto_parallel.work_dir}/model_info.json"

        cls.unit_gb = 1024 ** 3
        cls.p2p_band_width = 25

    @classmethod
    def init_global_process_group(cls):
        init_method = 'tcp://{}:{}'.format(cls.master_addr, cls.master_port)
        torch.distributed.init_process_group(
            backend=torch.distributed.Backend.GLOO,
            init_method=init_method,
            rank=cls.node_rank,
            world_size=cls.nnodes,
            timeout=datetime.timedelta(seconds=3600)
        )
