import os

from onpolicy.scripts.render.render_mpe import main
from train import num_agents, num_landmarks

env_name = "MPE"
wandb_env_name = "MPE_1118"
scenario_name = "trap_env_move"
# num_landmarks = 1
# num_agents = 4
ele_radius = 0.5
ele_time = 10

algorithm_name = "mappo"  # "mappo" "ippo"
experiment_name = "mat_radius"
n_rollout_threads = 1
seed = 2
episode_length = 100
render_episodes = 5
model_dir = (
    "E:\\Code_file\\Python\\on-policy-main\\onpolicy\\scripts\\results\\MPE\\"
    + scenario_name
    + "\\"
    + algorithm_name
    + "\\"
    + experiment_name
    # + "\\run1\\models\\"
    + "\\wandb\\0\\files\\"
)

import yaml

# 打开all_args.model_dir文件夹下的config.yaml文件，读取其中的内容
config_path = os.path.join(model_dir, "config.yaml")
with open(config_path, "r") as f:
    config = yaml.safe_load(f)

# 提取 ele_time 和 ele_radius
train_ele_time = config.get("ele_time", 0)["value"]  # 默认值为 0
train_ele_radius = config.get("ele_radius", 0.0)["value"]  # 默认值为 0.0
if __name__ == "__main__":
    argv = [
        "--use_wandb",
        "False",
        # "--share_policy",
        "--env_name",
        f"{env_name}",
        "--save_gifs",
        "True",
        "--algorithm_name",
        f"{algorithm_name}",
        "--experiment_name",
        f"{experiment_name}",
        "--scenario_name",
        f"{scenario_name}",
        "--num_agents",
        f"{num_agents}",
        "--num_landmarks",
        f"{num_landmarks}",
        "--n_rollout_threads",
        f"{n_rollout_threads}",
        "--seed",
        f"{seed}",
        "--episode_length",
        f"{episode_length}",
        "--use_render",
        "True",
        "--render_episodes",
        f"{render_episodes}",
        "--model_dir",
        f"{model_dir}",
        "--ele_time",
        f"{ele_time}",
        "--train_ele_time",
        f"{train_ele_time}",
        "--train_ele_radius",
        f"{train_ele_radius}",
        "--ele_radius",
        f"{ele_radius}",
    ]
    main(argv)
