from galvatron.core import  SearchEngine
from galvatron.core import initialize_galvatron
from arguments import model_args
from meta_configs import config_from_meta, set_model_config, model_name, model_layer_configs

import torch
from pathlib import Path
from transformers import LlamaConfig, LlamaForCausalLM
import json

import os
if __name__ == '__main__':
    args = initialize_galvatron(model_args, mode='search')
    print("***" * 5)
    config = config_from_meta(args.model_size)

    # TODO 在这里加compute的profile compute填入configs里
    # profile_layer_time(args.hidden_size,args.num_attention_heads,args.seq_length,args.mixed_precision)

    config = set_model_config(config, args, overwrite_args=False)
    path = os.path.dirname(os.path.abspath(__file__))
    print(args)
    print(config)
    
    search_engine = SearchEngine(args)
    search_engine.set_search_engine_info(path, model_layer_configs(config), model_name(config))
    # search_engine.set_microbatch_func(microbatch_size=4, max_chunk=8) # Optional
    search_engine.set_model_type('gpt') # Optional
    
    search_engine.initialize_search_engine()
    # search_engine.check_cost_model(bsz=16,chunk=1,min_tp=1)
    search_engine.parallelism_optimization()