# from asyncio import sleep

from geesibling.adapters.pytorch.galvatron.core import  SearchEngine
from geesibling.adapters.pytorch.galvatron.core import initialize_galvatron
from arguments import model_args
from meta_configs import config_from_meta, set_model_config, model_name, model_layer_configs

import torch
from pathlib import Path
from transformers import LlamaConfig, LlamaForCausalLM
import json

import os
import time
def profile_layer_time(
        hidden_size: int,
        num_attention_heads: int,
        seq_length: int,
        precision: str = "bf16",  # or "fp32"
        save_result: bool = True
) -> float:
    """
    测量Llama单层前向传播时间

    参数:
        hidden_size: 隐藏层维度
        num_hidden_layers: 总层数(仅用于配置)
        num_attention_heads: 注意力头数
        seq_length: 序列长度
        precision: 精度类型 ("bf16" 或 "fp32")
        save_result: 是否保存结果到JSON文件

    返回:
        单层前向传播时间(毫秒)
    """
    # 验证输入
    assert precision in ["bf16", "fp32"], "precision must be 'bf16' or 'fp32'"
    assert seq_length > 0, "sequence length must be positive"

    num_hidden_layers = 10      # todo 应该根据实际情况由用户来输入-----------

    # 创建配置
    config = LlamaConfig(
        hidden_size=hidden_size,
        num_attention_heads=num_attention_heads,
        num_hidden_layers=num_hidden_layers,
        intermediate_size=int(hidden_size * 2.6875),  # Llama标准的中间层扩展
        torch_dtype=torch.bfloat16 if precision == "bf16" else torch.float32,
    )

    # 加载模型(仅1层用于测量)
    model = LlamaForCausalLM(config).to("cuda")
    model.eval()

    # 设置精度
    if precision == "bf16":
        model = model.to(torch.bfloat16)

    # 创建输入(batch_size=1)
    input_ids = torch.randint(0, 32000, (1, seq_length), dtype=torch.long).to("cuda")

    # 预热
    with torch.no_grad():
        _ = model(input_ids)

    # 测量时间
    start_event = torch.cuda.Event(enable_timing=True)
    end_event = torch.cuda.Event(enable_timing=True)

    with torch.no_grad():
        torch.cuda.synchronize()
        start_event.record()
        _ = model(input_ids)  # 前向传播
        end_event.record()
        torch.cuda.synchronize()

    layer_time_ms = start_event.elapsed_time(end_event) / num_hidden_layers
    print(f"layer_time_ms: {layer_time_ms}")

    # 保存结果
    if save_result:
        configs_dir = Path("configs")
        configs_dir.mkdir(exist_ok=True)

        filename = f"computation_profiling_{precision}_hidden{hidden_size}_head{num_attention_heads}_seqlen{seq_length}.json"
        filepath = configs_dir / filename

        result = {
            "layertype_0": layer_time_ms  # 假设所有层类型相同
        }
        with open(filepath, "w") as f:
            json.dump(result, f, indent=4)
    return layer_time_ms

if __name__ == '__main__':
    args = initialize_galvatron(model_args, mode='search')
    print("***"*5)
    config = config_from_meta(args.model_size)

    # TODO 在这里加compute的profile compute填入configs里
    profile_layer_time(args.hidden_size,args.num_attention_heads,args.seq_length,args.mixed_precision)

    config = set_model_config(config, args, overwrite_args=False)
    path = os.path.dirname(os.path.abspath(__file__))
    print(args)
    print(config)
    
    search_engine = SearchEngine(args)
    search_engine.set_search_engine_info(path, model_layer_configs(config), model_name(config))
    # search_engine.set_microbatch_func(microbatch_size=4, max_chunk=8) # Optional
    search_engine.set_model_type('gpt') # Optional

    # TODO 根据self.model_type 来计算param_list----模拟
    
    search_engine.initialize_search_engine()
    # search_engine.check_cost_model(bsz=48,chunk=1,min_tp=1)
    optimal_strategy = search_engine.parallelism_optimization()
    # print(optimal_strategy)
    pipeline_parallel_size = optimal_strategy[0]
    tensor_parallel_size = optimal_strategy[1]

    # 设置环境变量
    # print(args)

    # GPU config
    os.environ["NGPUS_PER_NODE"] = str(getattr(args, 'num_gpus_per_node', 8))
    os.environ["NNODES"] = str(getattr(args, 'num_nodes', 1))
    # TODO ?多机的node rank不同,不用作为shell脚本接受的参数
    # 需要用户手动export NUM_GPUS_PER_NODE=1 --（2，3等）
    # os.environ["NODE_RANK"] = str(getattr(args, '', 0))

    # parallel config
    os.environ['TENSOR_PARALLEL_SIZE'] = str(tensor_parallel_size)
    os.environ['PIPELINE_PARALLEL_SIZE'] = str(pipeline_parallel_size)


    micro_batch_size = (args.settle_bsz /
                        ((args.num_gpus_per_node * args.num_nodes) / (tensor_parallel_size*pipeline_parallel_size))  /
                        args.settle_chunk
                        )
    os.environ['MICRO_BATCH_SIZE'] = str(getattr(args, 'micro_batch_size', 2))
    os.environ['MICRO_BATCH'] = str(getattr(args, 'settle_chunk', 8))

    # model related
    os.environ['TENSOR_LENGTH'] = str(getattr(args, 'seq_length', 256))
    os.environ['HIDDEN_SIZE'] = str(getattr(args, 'hidden_size', 4096))
    os.environ['NUM_HIDDEN_LAYERS'] = str(getattr(args, 'num_hidden_layers', 32))
    os.environ['NUM_ATTENTION_HEADS'] = str(getattr(args, 'num_attention_heads', 32))
    os.environ['VOCAB_SIZE'] = str(getattr(args, 'vocab_size', 32000))


    time.sleep(15)
    # 运行脚本
    os.system("bash ./gees.sh")


