"""Train an algorithm."""
import argparse
import json
from harl.utils.configs_tools import get_defaults_yaml_args, update_args

import os
import json
import datetime

def main():
    """Main function."""
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        "--algo",
        type=str,
        default="happo",
        choices=[
            "happo",
            "hatrpo",
            "haa2c",
            "haddpg",
            "hatd3",
            "hasac",
            "had3qn",
            "maddpg",
            "matd3",
            "mappo",
        ],
        help="Algorithm name. Choose from: happo, hatrpo, haa2c, haddpg, hatd3, hasac, had3qn, maddpg, matd3, mappo.",
    )
    parser.add_argument(
        "--env",
        type=str,
        default="pettingzoo_mpe",
        choices=[
            "smac",
            "mamujoco",
            "pettingzoo_mpe",
            "gym",
            "football",
            "dexhands",
            "smacv2",
            "lag",
        ],
        help="Environment name. Choose from: smac, mamujoco, pettingzoo_mpe, gym, football, dexhands, smacv2, lag.",
    )
    parser.add_argument(
        "--exp_name", type=str, default="installtest", help="Experiment name."
    )
    parser.add_argument(
        "--load_config",
        type=str,
        default="",
        help="If set, load existing experiment config file instead of reading from yaml config file.",
    )
    
    args, unparsed_args = parser.parse_known_args()

    def process(arg):
        try:
            return eval(arg)
        except:
            return arg

    keys = [k[2:] for k in unparsed_args[0::2]]  # remove -- from argument
    values = [process(v) for v in unparsed_args[1::2]]
    unparsed_dict = {k: v for k, v in zip(keys, values)}
    args = vars(args)  # convert to dict
    if args["load_config"] != "":  # load config from existing config file
        with open(args["load_config"], encoding="utf-8") as file:
            all_config = json.load(file)
        args["algo"] = all_config["main_args"]["algo"]
        args["env"] = all_config["main_args"]["env"]
        algo_args = all_config["algo_args"]
        env_args = all_config["env_args"]
    else:  # load config from corresponding yaml file
        algo_args, env_args = get_defaults_yaml_args(args["algo"], args["env"])
    update_args(unparsed_dict, algo_args, env_args)  # update args from command line

    # start training
    from harl.runners import RUNNER_REGISTRY
    
    tm_lst = [
        "", # teammate result path
    ]
    
    ego_lst = [
        "", # agent result path
    ]

    tm_idx = 1
    ego_id = 1 - tm_idx
    if tm_idx == 0:
        pair_lst = [[tm, ego] for tm in tm_lst for ego in ego_lst]
    elif tm_idx == 1:
        pair_lst = [[ego, tm] for tm in tm_lst for ego in ego_lst]
    else:
        assert 0
    
    timing = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        
    results = {}
    for i, pair in enumerate(pair_lst):
        print(f'\n{i+1} / {len(pair_lst)}\n')
        if len(pair) == 1:
            model_dir_0 = pair[0]
            model_dir_1 = pair[0]
        elif len(pair) == 2:
            model_dir_0, model_dir_1 = pair
        else:
            assert 0, pair
    
        model_dir_ = model_dir_0 if tm_idx == 0 else model_dir_1
            
        try:
            env_args['additional_reward_id'] = int(model_dir_.split('human_')[1].split('/')[0])
        except:
            env_args['additional_reward_id'] = 0
            
        # print(model_dir_0, model_dir_1, env_args['additional_reward_id'])
        if 'models' not in os.listdir(model_dir_0):
            sub_path = [x for x in os.listdir(model_dir_0) if os.path.isdir(os.path.join(model_dir_0, x))]
            assert len(sub_path) == 1
            model_dir_0 = os.path.join(model_dir_0, sub_path[0], 'models')
            assert os.path.exists(model_dir_0), model_dir_0
        else:
            model_dir_0 = os.path.join(model_dir_0, 'models')
            assert os.path.exists(model_dir_0), model_dir_0
        if 'models' not in os.listdir(model_dir_1):
            sub_path = [x for x in os.listdir(model_dir_1) if os.path.isdir(os.path.join(model_dir_1, x))]
            assert len(sub_path) == 1
            model_dir_1 = os.path.join(model_dir_1, sub_path[0], 'models')
            assert os.path.exists(model_dir_1), model_dir_1
        else:
            model_dir_1 = os.path.join(model_dir_1, 'models')
            assert os.path.exists(model_dir_1), model_dir_1
        
        os.makedirs(os.path.join(os.getcwd(), f"results/{timing}"), exist_ok=True)
        
        runner = RUNNER_REGISTRY[args["algo"]](args, algo_args, env_args)
        print(model_dir_0, model_dir_1)
        runner.restore_one_actor(0, model_dir_0)
        runner.restore_one_actor(1, model_dir_1)
        
        load_act = True
        if load_act and 'multihead' in pair[ego_id]:
            for act_id_int in range(1, 7):
                act_id = str(act_id_int)
                act_path = '_'.join(pair[ego_id].split('_')[:-1] + [act_id])
                if 'models' not in os.listdir(act_path):
                    sub_path = [x for x in os.listdir(act_path) if os.path.isdir(os.path.join(act_path, x))]
                    assert len(sub_path) == 1
                    act_path = os.path.join(act_path, sub_path[0], 'models')
                    assert os.path.exists(act_path), act_path
                else:
                    act_path = os.path.join(act_path, 'models')
                    assert os.path.exists(act_path), act_path
                runner.close()
                runner = RUNNER_REGISTRY[args["algo"]](args, algo_args, env_args)
                runner.restore_one_actor(0, model_dir_0)
                runner.restore_one_actor(1, model_dir_1)
                runner.restore_one_act(ego_id, act_path)
                runner.logger.init(1)
                runner.logger.episode_init(1)
                runner.eval()
                runner.close()
                
                result_json = os.path.join(runner.run_dir, "logs", "summary.json")
                with open(result_json, 'r') as file:
                    data_ = json.load(file)
                data = {k.split('/')[-1]: v[-1][-1] for k, v in data_.items()}
                name = f"{model_dir_0}+++{model_dir_1}---{act_path}" if tm_idx == 0 else f"{model_dir_0}---{act_path}+++{model_dir_1}"
                results[name] = {
                    'eval_score_rate': data['eval_score_rate'],
                    'eval_average_episode_rewards': data['eval_average_episode_rewards'],
                    'eval_average_episode_rewards_additional': data['eval_average_episode_rewards_additional'],
                }
            res_path = f"results/{timing}/res.json"
            with open(res_path, "w") as file:
                json.dump(results, file)
            print(f"Results stored in {res_path}")
        
        else:
            runner.logger.init(1)
            runner.logger.episode_init(1)
            runner.eval()
            runner.close()
            
            result_json = os.path.join(runner.run_dir, "logs", "summary.json")
            with open(result_json, 'r') as file:
                data_ = json.load(file)
            data = {k.split('/')[-1]: v[-1][-1] for k, v in data_.items()}
            results[f"{model_dir_0}+++{model_dir_1}"] = {
                'eval_score_rate': data['eval_score_rate'],
                'eval_average_episode_rewards': data['eval_average_episode_rewards'],
                'eval_average_episode_rewards_additional': data['eval_average_episode_rewards_additional'],
            }
            res_path = f"results/{timing}/res.json"
            with open(res_path, "w") as file:
                json.dump(results, file)
            print(f"Results stored in {res_path}")
    
if __name__ == "__main__":
    main()
