import os
from typing import Optional


class ScriptGenerator:
    def __init__(self, path: str = ".", max_pp_deg: int = 8):
        """
        初始化脚本生成器

        Args:
            path: 脚本输出路径
            max_pp_deg: 最大流水线并行深度
        """
        self.path = path
        self.args = type('Args', (), {'max_pp_deg': max_pp_deg})()

    def get_env(self) -> str:
        """模拟环境变量配置"""
        return """#!/bin/bash
# 模拟环境变量
export NUM_NODES=2
export NUM_GPUS_PER_NODE=8
export MASTER_ADDR=127.0.0.1
export MASTER_PORT=29500
export NODE_RANK=0\n\n"""

    def generate_script(self, num_nodes: int, num_gpus_per_node: int) -> None:
        """Generate test scripts for allreduce and p2p communication

        Args:
            num_nodes: Number of nodes to use
            num_gpus_per_node: Number of GPUs per node
        """
        world_size = num_nodes * num_gpus_per_node
        env = self.get_env()

        print("Generating allreduce test script...")

        # Generate allreduce test script
        def allreduce_script(allreduce_size: int, allreduce_consec: int) -> str:
            return (
                "python -m torch.distributed.launch "
                f"--nnodes=$NUM_NODES --nproc_per_node=$NUM_GPUS_PER_NODE "
                "--master_addr=$MASTER_ADDR --master_port=$MASTER_PORT "
                f"--node_rank=$NODE_RANK profile_allreduce.py "
                f"--global_tp_deg {allreduce_size} --global_tp_consec {allreduce_consec} "
                "--pp_deg 1 --nproc_per_node=$NUM_GPUS_PER_NODE \n"
            )

        # Write allreduce test script
        config_dir = os.path.join(self.path, "scripts")
        os.makedirs(config_dir, exist_ok=True)

        allreduce_path = os.path.join(config_dir, "profile_allreduce.sh")
        with open(allreduce_path, "w") as f:
            f.write(env)
            allreduce_size = num_nodes * num_gpus_per_node
            while allreduce_size > 1:
                for allreduce_consec in [1, 0]:
                    if world_size == allreduce_size and allreduce_consec == 0:
                        continue
                    script = allreduce_script(allreduce_size, allreduce_consec)
                    f.write(f'echo "Running: {script}"\n')
                    f.write(script)
                allreduce_size //= 2  # 使用整数除法
                f.write("sleep 1\n")

        print(f"Allreduce test script generated at: {allreduce_path}")

        print("Generating p2p test script...")

        # Generate p2p test script
        def p2p_script(pp_deg: int) -> str:
            return (
                "python -m torch.distributed.launch "
                f"--nnodes=$NUM_NODES --nproc_per_node=$NUM_GPUS_PER_NODE "
                "--master_addr=$MASTER_ADDR --master_port=$MASTER_PORT "
                f"--node_rank=$NODE_RANK profile_p2p.py "
                f"--global_tp_deg 1 --global_tp_consec 1 --pp_deg {pp_deg} "
                "--nproc_per_node=$NUM_GPUS_PER_NODE \n"
            )

        # Write p2p test script
        p2p_path = os.path.join(config_dir, "profile_p2p.sh")
        with open(p2p_path, "w") as f:
            f.write(env)
            pp_deg = 2
            while pp_deg <= world_size and pp_deg <= self.args.max_pp_deg:
                script = p2p_script(pp_deg)
                f.write(f'echo "Running: {script}"\n')
                f.write(script)
                pp_deg *= 2
                f.write("sleep 1\n")

        print(f"P2P test script generated at: {p2p_path}")


def validate_script_content(file_path: str):
    """验证生成的脚本内容"""
    print(f"\nValidating {file_path}:")
    with open(file_path, 'r') as f:
        print(f.read())


if __name__ == "__main__":
    # 初始化脚本生成器
    generator = ScriptGenerator()

    # 测试生成脚本 (2节点，每节点2GPU)
    print("=" * 50)
    print("Testing script generation for 2 nodes with 2 GPUs each")
    print("=" * 50)
    generator.generate_script(num_nodes=1, num_gpus_per_node=8)

    # 验证生成的文件内容
    validate_script_content("scripts/profile_allreduce.sh")
    validate_script_content("./scripts/profile_p2p.sh")

    print("\nScript generation test completed successfully!")