import os

from onpolicy.scripts.train.train_mpe import main

os.environ["WANDB_API_KEY"] = "0d6d05d9f00ba6c219c73e794931cc157069318d"
env_name = "MPE"
wandb_env_name = "MAT"
scenario_name = "trap_env_move"

num_landmarks = 1
num_agents = 4
ele_radius = 20  # 0.2
ele_time = 200

algorithm_name = "mappo"  # "mappo" "ippo" "mat"
experiment_name = "add_net"
n_rollout_threads = 1  # 32
seed = 2
episode_length = 100
num_env_steps = 3000000  # 20000000
ppo_epoch = 10
lr = 7e-4
critic_lr = 7e-4
# model_dir = (
#     "E:\\Code_file\\Python\\on-policy-main\\onpolicy\\scripts\\results\\MPE\\"
#     + scenario_name
#     + "\\rmappo\\check\\wandb\\"
#     + "run-20241119_160626-tlw7ljxz"
#     + "\\files"
# )
if __name__ == "__main__":
    argv = [
        # "--use_wandb",
        # "False",
        "--env_name",
        f"{env_name}",
        # "--share_policy",
        "--wandb_env_name",
        f"{wandb_env_name}",
        "--algorithm_name",
        f"{algorithm_name}",
        "--experiment_name",
        f"{experiment_name}",
        "--scenario_name",
        f"{scenario_name}",
        "--num_agents",
        f"{num_agents}",
        "--num_landmarks",
        f"{num_landmarks}",
        "--n_rollout_threads",
        f"{n_rollout_threads}",
        "--seed",
        f"{seed}",
        "--episode_length",
        f"{episode_length}",
        "--num_env_steps",
        f"{num_env_steps}",
        "--ppo_epoch",
        f"{ppo_epoch}",
        "--lr",
        f"{lr}",
        "--critic_lr",
        f"{critic_lr}",
        "--wandb_name",
        f"{scenario_name}",
        "--ele_time",
        f"{ele_time}",
        "--ele_radius",
        f"{ele_radius}",
        "--train_ele_time",
        f"{ele_time}",
        "--train_ele_radius",
        f"{ele_radius}",
        # "--model_dir",
        # f"{model_dir}",
    ]
    main(argv)
