import subprocess
from time import sleep

from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
from geesibling.adapters.pytorch.pipeline.pipeline.initialize import initialize_megatron_autoparallel
from transformers import LlamaTokenizer
from transformers import LlamaConfig
from functools import wraps
import os
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from geesibling.adapters.pytorch.galvatron.core import  SearchEngine
from geesibling.adapters.pytorch.galvatron.core import initialize_galvatron
from arguments import model_args
from meta_configs import config_from_meta, set_model_config, model_name, model_layer_configs

import torch
from pathlib import Path
# from transformers import LlamaConfig, LlamaForCausalLM


import json

import os
import time

def profile_layer_time(
        hidden_size: int,
        num_attention_heads: int,
        seq_length: int,
        precision: str = "bf16",  # or "fp32"
        save_result: bool = True
) -> float:
    """
    测量Llama单层前向传播时间

    参数:
        hidden_size: 隐藏层维度
        num_hidden_layers: 总层数(仅用于配置)
        num_attention_heads: 注意力头数
        seq_length: 序列长度
        precision: 精度类型 ("bf16" 或 "fp32")
        save_result: 是否保存结果到JSON文件

    返回:
        单层前向传播时间(毫秒)
    """
    # 验证输入
    assert precision in ["bf16", "fp32"], "precision must be 'bf16' or 'fp32'"
    assert seq_length > 0, "sequence length must be positive"

    num_hidden_layers = 10      # todo 应该根据实际情况由用户来输入-----------

    # 创建配置
    config = LlamaConfig(
        hidden_size=hidden_size,
        num_attention_heads=num_attention_heads,
        num_hidden_layers=num_hidden_layers,
        intermediate_size=int(hidden_size * 2.6875),  # Llama标准的中间层扩展
        torch_dtype=torch.bfloat16 if precision == "bf16" else torch.float32,
    )

    # 加载模型(仅1层用于测量)
    from geesibling.adapters.pytorch.megatron_patch.patch_utils import GeesPatchesManager
    GeesPatchesManager.remove_patches()

    from transformers.models.llama.modeling_llama import LlamaForCausalLM
    model = LlamaForCausalLM(config).to("cuda")
    from geesibling.adapters.pytorch.megatron_patch.megatron_adaptor import patch
    patch()
    print(111111111111111111111111111)
    model.eval()

    # 设置精度
    if precision == "bf16":
        model = model.to(torch.bfloat16)

    # 创建输入(batch_size=1)
    input_ids = torch.randint(0, 32000, (1, seq_length), dtype=torch.long).to("cuda")

    # 预热
    with torch.no_grad():
        _ = model(input_ids)

    # 测量时间
    start_event = torch.cuda.Event(enable_timing=True)
    end_event = torch.cuda.Event(enable_timing=True)

    with torch.no_grad():
        torch.cuda.synchronize()
        start_event.record()
        _ = model(input_ids)  # 前向传播
        end_event.record()
        torch.cuda.synchronize()

    layer_time_ms = start_event.elapsed_time(end_event) / num_hidden_layers
    print(f"layer_time_ms: {layer_time_ms}")

    # 保存结果
    if save_result:
        configs_dir = Path("configs")
        configs_dir.mkdir(exist_ok=True)

        filename = f"computation_profiling_{precision}_hidden{hidden_size}_head{num_attention_heads}_seqlen{seq_length}.json"
        filepath = configs_dir / filename

        result = {
            "layertype_0": layer_time_ms  # 假设所有层类型相同
        }
        with open(filepath, "w") as f:
            json.dump(result, f, indent=4)
    return layer_time_ms

def seach_hybrid_parallel(args):
    config = config_from_meta(args.model_size)
    # 仅第一个GPU运行profile
    profile_layer_time(args.hidden_size,args.num_attention_heads,args.seq_length,args.mixed_precision)
    config = set_model_config(config, args, overwrite_args=False)
    path = os.path.dirname(os.path.abspath(__file__))
    print(args)
    print(config)
    search_engine = SearchEngine(args)
    search_engine.set_search_engine_info(path, model_layer_configs(config), model_name(config))
    search_engine.set_model_type('gpt')  # Optional

    # TODO 根据self.model_type 来计算param_list----模拟

    search_engine.initialize_search_engine()
    # search_engine.check_cost_model(bsz=48,chunk=1,min_tp=1)
    optimal_strategy = search_engine.parallelism_optimization()
    pipeline_parallel_size = optimal_strategy[0]
    tensor_parallel_size = optimal_strategy[1]
    # args.pipeline_model_parallel_size = pipeline_parallel_size
    # args.tensor_model_parallel_size = tensor_parallel_size
    # # 根据global_batch_size=settle_bsz计算micro_batch_size
    micro_batch_size = (args.settle_bsz /
                        ((args.num_gpus_per_node * args.num_nodes) / (tensor_parallel_size * pipeline_parallel_size)) /
                        args.settle_chunk
                        )
    # args.micro_batch_size = micro_batch_size
    micro_batch_size = int(micro_batch_size)
    return pipeline_parallel_size,tensor_parallel_size,micro_batch_size


import torch.distributed as dist
def parallelize(config):
    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            # 先固定 3D 并行度

            # init_method = 'tcp://'
            # master_ip = os.getenv('MASTER_ADDR', 'localhost')
            # master_port = os.getenv('MASTER_PORT', '6000')
            # init_method += master_ip + ':' + master_port
            # """
            #     这步与 GPU 分配无关，而是设置整个分布式训练环境。
            #     world_size: 总进程数。
            #     rank: 当前进程的全局排名。
            #     这确保所有进程可以相互通信，无论它们使用哪些 GPU。
            # """
            #
            # # gees 对分布式模块进行一个初始化操作。
            # torch.distributed.init_process_group(
            #     backend=config.args.distributed_backend,
            #     world_size=config.args.world_size, rank=config.args.rank,
            #     init_method=init_method)

            print(config.args)

            if config.args.local_rank == 0:
                pipeline_parallel_size,tensor_parallel_size,micro_batch_size = (
                    seach_hybrid_parallel(config.args))
                parallel_info = torch.tensor(
                    [pipeline_parallel_size, tensor_parallel_size, micro_batch_size], dtype=torch.int).to(config.args.local_rank) # 固定值
            else:
                parallel_info = torch.zeros(3, dtype=torch.int).to(config.args.local_rank)

            dist.broadcast(parallel_info, src=0)

            config.args.pipeline_model_parallel_size = int(parallel_info[0])
            config.args.tensor_model_parallel_size = int(parallel_info[1])
            config.args.micro_batch_size = int(parallel_info[2])
            
            # 初始化
            gpus_list = initialize_megatron_autoparallel()      # 相较于之前版本取消了初始化内容
            config.args.gpus_list = gpus_list
            # print('get tokenizer')
            # tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
            # if tokenizer.pad_token is None:
            #     tokenizer.pad_token = tokenizer.eos_token

            # # hugging face
            # cfg = LlamaConfig(
            #     hidden_size=getattr(config.args, 'hidden_size', 4096),
            #     num_hidden_layers=getattr(config.args, 'num_hidden_layers', 8),
            #     num_attention_heads=getattr(config.args, 'num_attention_heads', 32),
            #     vocab_size=getattr(config.args, 'vocab_size', 32000),
            # )

            # print(cfg)
            # wrapper.config =  config.args
            # wrapper.cfg = cfg
            #wrapper.gpus_list = gpus_list
            # 调用原函数
            wrapper.epoch = config.args.epoch
            print(f"tp:{mpu.get_tensor_model_parallel_world_size()}")
            print(f"dp:{mpu.get_data_parallel_world_size()}")
            print(f"pp:{mpu.get_pipeline_model_parallel_world_size()}")
            return func(*args, **kwargs)

        return wrapper
    return decorator




class Profile:


    @staticmethod
    def generate_script(args):
        num_nodes = args.num_nodes
        num_gpus_per_node = args.num_gpus_per_node
        master_addr = args.master_addr
        master_port = args.master_port
        node_rank = args.node_rank
        # env = {
        #     'NUM_NODES': args.num_nodes,
        #     'NUM_GPUS_PER_NODE': args.num_gpus_per_node,
        #     'MASTER_ADDR': args.master_addr,
        #     'MASTER_PORT': args.master_port,
        #     'NODE_RANK': args.node_rank,
        # }
        # env_str = "\n".join([k for k in args.envs]) + "\n"
        #
        # env_str += "\n".join([f"export {k}={v}" for k, v in env.items()]) + "\n"

        def get_env(args):
            env = {
                'NUM_NODES': args.num_nodes,
                'NUM_GPUS_PER_NODE': args.num_gpus_per_node,
                'MASTER_ADDR': args.master_addr,
                'MASTER_PORT': args.master_port,
                'NODE_RANK': args.node_rank,
            }
            # env_str = "\n".join([k for k in args.envs]) + "\n"
            env_str = ""
            env_str += "\n".join([f"export {k}={v}" for k, v in env.items()]) + "\n"
            return env_str

        world_size = num_nodes * num_gpus_per_node
        env = get_env(args)
        def allreduce_script(allreduce_size, allreduce_consec):
            return "python -m torch.distributed.launch --nnodes=$NUM_NODES --nproc_per_node=$NUM_GPUS_PER_NODE --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT --node_rank=$NODE_RANK profile_allreduce.py --global_tp_deg %d --global_tp_consec %d --pp_deg 1 --nproc_per_node=$NUM_GPUS_PER_NODE \n" % (
            allreduce_size, allreduce_consec)

        with open('../../profile_hardware/scripts/profile_allreduce.sh', 'w') as f:
            f.write(env)
            allreduce_size = num_nodes * num_gpus_per_node
            while allreduce_size > 1:
                for allreduce_consec in [1, 0]:
                    if world_size == allreduce_size and allreduce_consec == 0:
                        continue
                    f.write("echo \"Running: %s\"\n" % allreduce_script(allreduce_size, allreduce_consec))
                    f.write(allreduce_script(allreduce_size, allreduce_consec))
                    f.write('sleep 20\n')
                allreduce_size /= 2
                f.write('sleep 10\n')


        def p2p_script(pp_deg):
            return "python -m torch.distributed.launch --nnodes=$NUM_NODES --nproc_per_node=$NUM_GPUS_PER_NODE --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT --node_rank=$NODE_RANK profile_p2p.py --global_tp_deg 1 --global_tp_consec 1 --pp_deg %d --nproc_per_node=$NUM_GPUS_PER_NODE \n" % (
                pp_deg)

        with open('../../profile_hardware/scripts/profile_p2p.sh', 'w') as f:
            f.write(env)
            pp_deg = 2
            # TODO 如果pp调大会有bug 这里全程改用pp_deg=2?
            # TODO 在这里只记录pp=2，然后在profile p2p.py里只执行pp=2的，其他都一样。根据while写入-----
            # while pp_deg <= world_size and pp_deg <= args.max_pp_deg:
            while pp_deg <= 2 and pp_deg <= args.max_pp_deg:
                f.write("echo \"Running: %s\"\n" % p2p_script(pp_deg))
                f.write(p2p_script(pp_deg))
                pp_deg *= 2
                f.write('sleep 10\n')

    @staticmethod
    def run_script_and_wait(script_path):
        """阻塞运行脚本，直到完成"""
        proc = subprocess.Popen(["bash", script_path])
        proc.wait()  # 阻塞，直到脚本运行结束

    @staticmethod
    def run_script_and_wait(script_path):
        """切到 ../../profile_hardware 下运行脚本，并阻塞直到完成，最后回到原目录"""
        # 记录当前路径
        cwd = os.getcwd()
        try:
            # 目标目录 ../../profile_hardware
            profile_dir = os.path.abspath(
                os.path.join(os.path.dirname(__file__), "../../profile_hardware")
            )
            print(f"[Profile] 切换到目录: {profile_dir} 运行 {script_path}")

            # 阻塞运行脚本
            proc = subprocess.Popen(["bash", script_path], cwd=profile_dir)
            proc.wait()
        finally:
            # 回到原目录
            os.chdir(cwd)
            print(f"[Profile] 已回到原目录: {cwd}")


    @staticmethod
    def get_cluster():
        #return True
        args = get_args()
        # TODO 在第一个进程上触发获得通信带宽的部分。其他进程等待
        # if args.local_rank == 0:
        #     parallel_info = torch.tensor(
        #         [4, 2, 1], dtype=torch.int).to(
        #         args.local_rank)  # 固定值
        # else:
        #     parallel_info = torch.zeros(3, dtype=torch.int).to(args.local_rank)
        #     dist.broadcast(parallel_info, src=0)
        # TODO参考这部分 到时候反过来写

        if args.local_rank !=0:
            hardware_info = torch.zeros(3, dtype=torch.int).to(args.local_rank)
            dist.broadcast(hardware_info, src=0)

        if args.local_rank == 0:
            if args.rank ==0:
                # TODO 在/data目录下生成这两个脚本，也在data目录下创建新的profile_allreduce.py和profile_p2p.py
                # TODO 为了现在方便调试 把profile_p2p.py和profile_allreduce.py也放在当前目录下
                Profile.generate_script(args)
                # dist.broadcast(hardware_info, src=0)
                
                # TODO 检查../../hardware_profile/hardwar
                # Construct the file path with the dynamic values
                file_path = f'../../profile_hardware/hardware_configs/allreduce_bandwidth_{args.nnodes}nodes_{args.num_gpus_per_node}gpus_per_node.json'
                file_path2 = f'../../profile_hardware/hardware_configs/p2p_bandwidth_{args.nnodes}nodes_{args.num_gpus_per_node}gpus_per_node.json'
                # Check if the file exists
                if  not  os.path.exists(file_path ) or not os.path.exists(file_path2 ):
                    # 顺序执行脚本，阻塞直到完成
                    Profile.run_script_and_wait("scripts/profile_allreduce.sh")
                    sleep(10)
                    Profile.run_script_and_wait("scripts/profile_p2p.sh")
                    sleep(10)
                
            # DEBUG
            parallel_info = torch.tensor(
                [4, 2, 1], dtype=torch.int).to(
                args.local_rank)  # 固定值
            dist.broadcast(parallel_info, src=0)
        return True


# def DataLoader1(dataset):
#     from geesibling.adapters.pytorch.get_data import get_data_loader,get_data_loader_with_ddp,get_train_dataset,collate_fn
#     from torch.utils.data.distributed import DistributedSampler
#     from torch.utils.data import DataLoader
#     from geesibling.adapters.pytorch.pipeline.megatron import mpu
#     args = get_args()
#     micro_batch_size = args.micro_batch_size
#     tensor_length = args.tensor_length
#
#     print('get tokenizer')
#     tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
#     if tokenizer.pad_token is None:
#         tokenizer.pad_token = tokenizer.eos_token
#
#     train_dataset = get_train_dataset(dataset, tokenizer, tensor_length)
#     if mpu.get_data_parallel_world_size() > 1:
#         train_sampler = DistributedSampler(train_dataset, num_replicas=mpu.get_data_parallel_world_size(),
#                                            rank=mpu.get_data_parallel_rank())
#         dataloader = DataLoader(train_dataset, batch_size=micro_batch_size, shuffle=False, sampler=train_sampler,
#                                 collate_fn=lambda batch: collate_fn(batch, micro_batch_size, tensor_length))
#     else:
#         dataloader = DataLoader(train_dataset, batch_size=micro_batch_size, shuffle=True,
#                                 collate_fn=lambda batch: collate_fn(batch, micro_batch_size, tensor_length))
#     return dataloader



#  cfg = LlamaConfig(
#             hidden_size=args.hidden_size if args.hidden_size is not None else 4096,
#             num_hidden_layers=args.num_hidden_layers if args.num_hidden_layers is not None else 8,  # 你的默认值是8
#             num_attention_heads=args.num_attention_heads if args.num_attention_heads is not None else 32,
#             vocab_size=args.vocab_size if args.vocab_size is not None else 32000,
#         )