"""Train an algorithm."""
import argparse
import json
from harl.utils.configs_tools import get_defaults_yaml_args, update_args

import os
import json
import pickle
import matplotlib.pyplot as plt
import numpy as np
import shutil

def handle_replay(path):
    
    info = {}

    frame_lst = []
    ball_pos_lst = []
    left_player1_pos_lst = []
    left_player2_pos_lst = []
    right_player1_pos_lst = []

    obs_path = os.path.join(path, 'obs')
    for pkl_ in sorted([int(x.split('.')[0]) for x in os.listdir(obs_path)]):
        pkl = f'{pkl_}.pkl'
        with open(os.path.join(obs_path, pkl), "rb") as f:
            loaded_dict = pickle.load(f)
        frame_lst.append(loaded_dict['frame'])
        ball_pos_lst.append(loaded_dict['ball'])
        left_player1_pos_lst.append(loaded_dict['left_team'][1])
        left_player2_pos_lst.append(loaded_dict['left_team'][2])
        right_player1_pos_lst.append(loaded_dict['right_team'][0])
        if loaded_dict['score'][0] == 1:
            info['score'] = True
        else:
            info['score'] = False
            
    # Plot the 2D trajectory
    plt.figure(figsize=(8, 6))

    # Plot ball trajectory
    pos_lst = ball_pos_lst[:-1]
    x_coords = [coord[0] for coord in pos_lst]
    y_coords = [coord[1] for coord in pos_lst]
    y_coords = [-y for y in y_coords]
    plt.plot(x_coords, y_coords, marker='o', linestyle='-', label="Ball")

    # Plot left player 1 trajectory
    pos_lst = left_player1_pos_lst[:-1]
    x_coords = [coord[0] for coord in pos_lst]
    y_coords = [coord[1] for coord in pos_lst]
    y_coords = [-y for y in y_coords]
    plt.plot(x_coords, y_coords, marker='o', linestyle='-', label="Turing")

    # Plot left player 2 trajectory
    pos_lst = left_player2_pos_lst[:-1]
    x_coords = [coord[0] for coord in pos_lst]
    y_coords = [coord[1] for coord in pos_lst]
    y_coords = [-y for y in y_coords]
    plt.plot(x_coords, y_coords, marker='o', linestyle='-', label="Johnson")

    # Plot right player 1 trajectory
    pos_lst = right_player1_pos_lst[:-1]
    x_coords = [coord[0] for coord in pos_lst]
    y_coords = [coord[1] for coord in pos_lst]
    y_coords = [-y for y in y_coords]
    plt.plot(x_coords, y_coords, marker='o', linestyle='-', label="Meitner")

    # Add a legend
    plt.legend()

    # Add grid
    plt.grid(True)

    # Save the figure
    output_path = os.path.join(path, 'traj.png')
    plt.savefig(output_path)
    plt.close()
    
    info['pass_history'] = loaded_dict['pass_history']
    score_info = {}
    for k, v in loaded_dict['score_info'].items():
        if type(v) == np.ndarray:
            score_info[k] = v.tolist()
        else:
            score_info[k] = v
    info['score_info'] = score_info

    return info

def main():
    """Main function."""
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        "--algo",
        type=str,
        default="happo",
        choices=[
            "happo",
            "hatrpo",
            "haa2c",
            "haddpg",
            "hatd3",
            "hasac",
            "had3qn",
            "maddpg",
            "matd3",
            "mappo",
        ],
        help="Algorithm name. Choose from: happo, hatrpo, haa2c, haddpg, hatd3, hasac, had3qn, maddpg, matd3, mappo.",
    )
    parser.add_argument(
        "--env",
        type=str,
        default="pettingzoo_mpe",
        choices=[
            "smac",
            "mamujoco",
            "pettingzoo_mpe",
            "gym",
            "football",
            "dexhands",
            "smacv2",
            "lag",
        ],
        help="Environment name. Choose from: smac, mamujoco, pettingzoo_mpe, gym, football, dexhands, smacv2, lag.",
    )
    parser.add_argument(
        "--exp_name", type=str, default="installtest", help="Experiment name."
    )
    parser.add_argument(
        "--load_config",
        type=str,
        default="",
        help="If set, load existing experiment config file instead of reading from yaml config file.",
    )
    
    args, unparsed_args = parser.parse_known_args()

    def process(arg):
        try:
            return eval(arg)
        except:
            return arg

    keys = [k[2:] for k in unparsed_args[0::2]]  # remove -- from argument
    values = [process(v) for v in unparsed_args[1::2]]
    unparsed_dict = {k: v for k, v in zip(keys, values)}
    args = vars(args)  # convert to dict
    if args["load_config"] != "":  # load config from existing config file
        with open(args["load_config"], encoding="utf-8") as file:
            all_config = json.load(file)
        args["algo"] = all_config["main_args"]["algo"]
        args["env"] = all_config["main_args"]["env"]
        algo_args = all_config["algo_args"]
        env_args = all_config["env_args"]
    else:  # load config from corresponding yaml file
        algo_args, env_args = get_defaults_yaml_args(args["algo"], args["env"])
    update_args(unparsed_dict, algo_args, env_args)  # update args from command line

    # start training
    from harl.runners import RUNNER_REGISTRY
    
    model_dir_0 = algo_args['train']['semdiv_eval_model_0']
    model_dir_1 = algo_args['train']['semdiv_eval_model_1']
    
    use_render = algo_args['render']['use_render']
    algo_args['render']['use_render'] = False
    env_args['render'] = False
    
    runner = RUNNER_REGISTRY[args["algo"]](args, algo_args, env_args)
    runner.restore_one_actor(0, model_dir_0)
    runner.restore_one_actor(1, model_dir_1)
    
    if algo_args['train']['semdiv_eval_model_act_id'] != -1:
        runner.restore_one_act(algo_args['train']['semdiv_eval_model_act_id'], algo_args['train']['semdiv_eval_model_act'])
        
    runner.logger.init(1)
    runner.logger.episode_init(1)
    runner.eval()
    runner.close()
    result_json = os.path.join(runner.run_dir, "logs", "summary.json")
    with open(result_json, 'r') as file:
        data_ = json.load(file)
    data = {k.split('/')[-1]: v[-1][-1] for k, v in data_.items()}
    results = {
        'eval_score_rate': data['eval_score_rate'],
        'eval_average_episode_rewards': data['eval_average_episode_rewards'],
        'eval_average_episode_rewards_additional': data['eval_average_episode_rewards_additional'],
    }
    
    exp_name = args['exp_name']
    os.makedirs(os.path.join(os.getcwd(), f"results/{exp_name}"), exist_ok=False)
    # render and add to `results`
    if use_render:
        algo_args['render']['render_episodes'] = 1
        algo_args['render']['use_render'] = True
        env_args['render'] = True
        env_args['render_dir'] = os.path.join(os.getcwd(), f"results/{exp_name}")
        
        n_attempt = 5
        for i in range(1, n_attempt + 1):
            algo_args['seed']['seed'] = i
            runner = RUNNER_REGISTRY[args["algo"]](args, algo_args, env_args)
            runner.restore_one_actor(0, model_dir_0)
            runner.restore_one_actor(1, model_dir_1)
            if algo_args['train']['semdiv_eval_model_act_id'] != -1:
                runner.restore_one_act(algo_args['train']['semdiv_eval_model_act_id'], algo_args['train']['semdiv_eval_model_act'])
            runner.render()
            runner.close()
            
            info = handle_replay(env_args['render_dir'])
            
            if info['score']:
                break
            elif i < n_attempt:
                shutil.rmtree(env_args['render_dir'])
            
        for k, v in info.items():
            assert k not in results, k
            results[k] = v
    
    res_path = f"results/{exp_name}/res.json"
    with open(res_path, "w") as file:
        json.dump(results, file)
    
if __name__ == "__main__":
    main()
    