from MAPPO import MAPPO
from common.utils import agg_double_list, copy_file_new, init_dir
import sys
sys.path.append("../highway-env")

import gym
import numpy as np
import matplotlib.pyplot as plt
import highway_env
import argparse
import configparser
import os
import time

# from datetime import datetime




def parse_args():
    """
    Description for this experiment:
        + easy: globalR
        + seed = 0
    """
    default_base_dir = "./results/videos/new/"
    default_config_dir = 'configs/configs_ppo.ini'
    parser = argparse.ArgumentParser(description=('Train or evaluate policy on RL environment '
                                                  'using mappo'))
    parser.add_argument('--base-dir', type=str, required=False,
                        default=default_base_dir, help="experiment base dir")
    parser.add_argument('--option', type=str, required=False,
                        default='train', help="train or evaluate")
    parser.add_argument('--config-dir', type=str, required=False,
                        default=default_config_dir, help="experiment config path")
    parser.add_argument('--model-dir', type=str, required=False,
                        default='', help="pretrained model path")
    parser.add_argument('--evaluation-seeds', type=str, required=False,
                        default=','.join([str(i) for i in range(0, 600, 20)]),
                        help="random seeds for evaluation, split by ,")
    args = parser.parse_args()
    return args


def train(args):
    base_dir = args.base_dir
    config_dir = args.config_dir
    config = configparser.ConfigParser()
    config.read(config_dir)

    # create an experiment folder
    now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
    # now = datetime.utcnow().strftime("%b_%d_%H_%M_%S")
    # output_dir = base_dir + now
    output_dir = os.path.join(base_dir + "medium-0-12")
   
    dirs = init_dir(output_dir)
    copy_file_new(dirs['configs'])
    

    if os.path.exists(args.model_dir):
        model_dir = args.model_dir
    else:
        model_dir = dirs['models']

    # model configs
    BATCH_SIZE = config.getint('MODEL_CONFIG', 'BATCH_SIZE')
    MEMORY_CAPACITY = config.getint('MODEL_CONFIG', 'MEMORY_CAPACITY')
    ROLL_OUT_N_STEPS = config.getint('MODEL_CONFIG', 'ROLL_OUT_N_STEPS')
    reward_gamma = config.getfloat('MODEL_CONFIG', 'reward_gamma')
    actor_hidden_size = config.getint('MODEL_CONFIG', 'actor_hidden_size')
    critic_hidden_size = config.getint('MODEL_CONFIG', 'critic_hidden_size')
    MAX_GRAD_NORM = config.getfloat('MODEL_CONFIG', 'MAX_GRAD_NORM')
    ENTROPY_REG = config.getfloat('MODEL_CONFIG', 'ENTROPY_REG')
    reward_type = config.get('MODEL_CONFIG', 'reward_type')
    TARGET_UPDATE_STEPS = config.getint('MODEL_CONFIG', 'TARGET_UPDATE_STEPS')
    TARGET_TAU = config.getfloat('MODEL_CONFIG', 'TARGET_TAU')

    # train configs
    actor_lr = config.getfloat('TRAIN_CONFIG', 'actor_lr')
    critic_lr = config.getfloat('TRAIN_CONFIG', 'critic_lr')
    MAX_EPISODES = config.getint('TRAIN_CONFIG', 'MAX_EPISODES')
    EPISODES_BEFORE_TRAIN = config.getint('TRAIN_CONFIG', 'EPISODES_BEFORE_TRAIN')
    EVAL_INTERVAL = config.getint('TRAIN_CONFIG', 'EVAL_INTERVAL')
    EVAL_EPISODES = config.getint('TRAIN_CONFIG', 'EVAL_EPISODES')
    reward_scale = config.getfloat('TRAIN_CONFIG', 'reward_scale')

    # init env
    env = gym.make('merge-multi-agent-v0')
    # env = gym.make('intersection-multi-agent-v0')
    # env = gym.make('highway-v0')


    env.config['seed'] = config.getint('ENV_CONFIG', 'seed')
    env.config['simulation_frequency'] = config.getint('ENV_CONFIG', 'simulation_frequency')
    env.config['duration'] = config.getint('ENV_CONFIG', 'duration')
    env.config['policy_frequency'] = config.getint('ENV_CONFIG', 'policy_frequency')
    env.config['COLLISION_REWARD'] = config.getint('ENV_CONFIG', 'COLLISION_REWARD')
    env.config['HIGH_SPEED_REWARD'] = config.getint('ENV_CONFIG', 'HIGH_SPEED_REWARD')
    env.config['HEADWAY_COST'] = config.getint('ENV_CONFIG', 'HEADWAY_COST')
    env.config['HEADWAY_TIME'] = config.getfloat('ENV_CONFIG', 'HEADWAY_TIME')
    env.config['MERGING_LANE_COST'] = config.getint('ENV_CONFIG', 'MERGING_LANE_COST')
    env.config['traffic_density'] = config.getint('ENV_CONFIG', 'traffic_density')
    traffic_density = config.getint('ENV_CONFIG', 'traffic_density')
    
    env.config['action_masking'] = config.getboolean('MODEL_CONFIG', 'action_masking')
    
    env.config['safety_guarantee'] = config.getboolean('ENV_CONFIG', 'safety_guarantee')
    env.config['n_step'] = config.getint('ENV_CONFIG', 'n_step')
    

    assert env.T % ROLL_OUT_N_STEPS == 0

    # 设置评估环境参数 
    env_eval = gym.make('merge-multi-agent-v0')
    env_eval.config['seed'] = config.getint('ENV_CONFIG', 'seed') + 1
    env_eval.config['simulation_frequency'] = config.getint('ENV_CONFIG', 'simulation_frequency')
    env_eval.config['duration'] = config.getint('ENV_CONFIG', 'duration')
    env_eval.config['policy_frequency'] = config.getint('ENV_CONFIG', 'policy_frequency')
    env_eval.config['COLLISION_REWARD'] = config.getint('ENV_CONFIG', 'COLLISION_REWARD')
    env_eval.config['HIGH_SPEED_REWARD'] = config.getint('ENV_CONFIG', 'HIGH_SPEED_REWARD')
    env_eval.config['HEADWAY_COST'] = config.getint('ENV_CONFIG', 'HEADWAY_COST')
    env_eval.config['HEADWAY_TIME'] = config.getfloat('ENV_CONFIG', 'HEADWAY_TIME')
    env_eval.config['MERGING_LANE_COST'] = config.getint('ENV_CONFIG', 'MERGING_LANE_COST')
    env_eval.config['traffic_density'] = config.getint('ENV_CONFIG', 'traffic_density')

    env_eval.config['action_masking'] = config.getboolean('MODEL_CONFIG', 'action_masking')
    env_eval.config['safety_guarantee'] = config.getboolean('ENV_CONFIG', 'safety_guarantee') 
    env_eval.config['n_step'] = config.getint('ENV_CONFIG', 'n_step')

    state_dim = env.n_s
    action_dim = env.n_a
    test_seeds = args.evaluation_seeds
    best_eval_reward = -100

    mappo = MAPPO(env=env, memory_capacity=MEMORY_CAPACITY,
                  state_dim=state_dim, action_dim=action_dim,
                  batch_size=BATCH_SIZE, entropy_reg=ENTROPY_REG,
                  roll_out_n_steps=ROLL_OUT_N_STEPS,
                  actor_hidden_size=actor_hidden_size, critic_hidden_size=critic_hidden_size,
                  actor_lr=actor_lr, critic_lr=critic_lr, reward_scale=reward_scale,
                  target_update_steps=TARGET_UPDATE_STEPS, target_tau=TARGET_TAU,
                  reward_gamma=reward_gamma, reward_type=reward_type,
                  max_grad_norm=MAX_GRAD_NORM, test_seeds=test_seeds,
                  episodes_before_train=EPISODES_BEFORE_TRAIN, traffic_density=traffic_density
                  )

    # # 启用交互模式
    # plt.ion()
    # fig, ax = plt.subplots()
    # line, = ax.plot([], [], 'b-', label="MAPPO")
    # ax.set_xlabel("Episode")
    # ax.set_ylabel("Average Reward")
    # ax.legend()
    # plt.title("Training Progress")

    # load the model if exist
    mappo.load(model_dir, train_mode=True)
    env.seed = env.config['seed']
    env.unwrapped.seed = env.config['seed']
    eval_rewards = []
    best_eval_reward = -100

    # TensorBoard 
    from torch.utils.tensorboard import SummaryWriter
    # writer = SummaryWriter("./results/mappo_training_logs")
    
    project_path = "/home/tianxj/code/MARL_CAVs/MARL/results"
    name = "new_training_logs"
    writer_summary_path = os.path.join(project_path, "Logs", name)
    # current_time = time.strftime("%Y%m%d-%H%M%S", time.localtime())
    log_dir = os.path.join(writer_summary_path, "medium-0-12")
    writer = SummaryWriter(log_dir=log_dir, comment=name)


    while mappo.n_episodes < MAX_EPISODES:
        mappo.interact()
        
        # if mappo.episode_done:
        #     # 记录安全统计
        #     # 累加安全统计
        #     mappo.total_violations += mappo.episode_safety_violations
        #     mappo.total_interventions += mappo.episode_safety_interventions
        #     mappo.total_steps += mappo.epoch_steps[-1]
            
            # if mappo.safety_violations and mappo.epoch_steps[-1] > 0:  # 确保有数据且步数大于0
            #     writer.add_scalar('Safety/Violations', 
            #                     mappo.safety_violations[-1], 
            #                     mappo.n_episodes)
            #     writer.add_scalar('Safety/Interventions',
            #                     mappo.safety_interventions[-1],
            #                     mappo.n_episodes)
                
            #     # 计算并记录安全率
            #     safety_rate = 1.0 - (float(mappo.safety_violations[-1]) / max(1, mappo.epoch_steps[-1]))
            #     writer.add_scalar('Safety/Rate', safety_rate, mappo.n_episodes)
                
            #     # 记录平均每步干预次数
            #     intervention_rate = float(mappo.safety_interventions[-1]) / max(1, mappo.epoch_steps[-1])
            #     writer.add_scalar('Safety/InterventionRate', intervention_rate, mappo.n_episodes)
                
            #     print(f"Episode {mappo.n_episodes}")
            #     print(f"Safety Rate: {safety_rate:.2%}")
            #     print(f"Violations: {mappo.safety_violations[-1]}")
            #     print(f"Interventions: {mappo.safety_interventions[-1]}")

        if mappo.n_episodes >= EPISODES_BEFORE_TRAIN:
            mappo.train()

        if mappo.episode_done and ((mappo.n_episodes + 1) % EVAL_INTERVAL == 0):
            rewards, _, _, _ = mappo.evaluation(env_eval, dirs['train_videos'], EVAL_EPISODES)
            rewards_mu, rewards_std = agg_double_list(rewards)
            # 打印评估结果
            print(f"\nEpisode {mappo.n_episodes + 1}")
            print(f"Average Reward: {rewards_mu:.2f} ± {rewards_std:.2f}")
            # print(f"Safety Statistics:")
            # print(f"- Violations: {avg_stats['violations']:.2f} ± {std_stats['violations']:.2f}")
            # print(f"- Interventions: {avg_stats['interventions']:.2f} ± {std_stats['interventions']:.2f}")
            # print(f"- Safety Rate: {avg_stats['safety_rates']:.2%} ± {std_stats['safety_rates']:.2%}")
            
            # print("Episode %d, Average Reward %.2f" % (mappo.n_episodes + 1, rewards_mu))
            eval_rewards.append(rewards_mu)

            # 记录到TensorBoard
            writer.add_scalar("Average Reward", rewards_mu, mappo.n_episodes + 1)
            # writer.add_scalar('Training/SafetyRate', mappo.safety_rates[-1], mappo.n_episodes + 1)
            # writer.add_scalar('Training/Violations', mappo.safety_violations[-2], mappo.n_episodes + 1)
            # writer.add_scalar('Training/Interventions', mappo.safety_interventions[-2], mappo.n_episodes + 1)
            # # writer.add_scalar('Evaluation/AverageReward', rewards_mu, mappo.n_episodes + 1)
            # writer.add_scalar('Evaluation/RewardStd', rewards_std, mappo.n_episodes+ 1)
            # writer.add_scalar('Safety/AverageViolations', avg_stats['violations'], mappo.n_episodes + 1)
            # writer.add_scalar('Safety/ViolationsStd', std_stats['violations'], mappo.n_episodes + 1)
            # writer.add_scalar('Safety/AverageInterventions', avg_stats['interventions'], mappo.n_episodes + 1)
            # writer.add_scalar('Safety/InterventionsStd', std_stats['interventions'], mappo.n_episodes + 1)
            # writer.add_scalar('Safety/AverageSafetyRate', avg_stats['safety_rates'], mappo.n_episodes + 1)
            # writer.add_scalar('Safety/SafetyRateStd', std_stats['safety_rates'], mappo.n_episodes + 1)
            writer.flush()
           
        
            # # 写入 TensorBoard
            # writer.add_scalar("Average Reward", rewards_mu, mappo.n_episodes + 1)
            # writer.flush()

            # # **更新奖励曲线**
            # line.set_xdata(range(len(eval_rewards)))
            # line.set_ydata(eval_rewards)
            # ax.relim()
            # ax.autoscale_view()
            # plt.draw()
            # plt.pause(0.1)  # 暂停一小会，让图像更新

            # save the model
            if rewards_mu > best_eval_reward:
                mappo.save(dirs['models'], 100000)
                mappo.save(dirs['models'], mappo.n_episodes + 1)
                best_eval_reward = rewards_mu
            else:
                mappo.save(dirs['models'], mappo.n_episodes + 1)
            np.save(output_dir + '/{}'.format('episode_rewards'), np.array(mappo.episode_rewards))
            np.save(output_dir + '/{}'.format('eval_rewards'), np.array(eval_rewards))
            np.save(output_dir + '/{}'.format('average_speed'), np.array(mappo.average_speed))

            # # 新增：保存安全统计数据
            # np.save(output_dir + '/{}'.format('safety_rates'), np.array(mappo.safety_rates))
            # np.save(output_dir + '/{}'.format('safety_violations'), np.array(mappo.episode_violations))
            # np.save(output_dir + '/{}'.format('safety_interventions'), np.array(mappo.episode_interventions))
            
            
            # # 保存每个评估间隔的平均统计
            # safety_stats = {
            #     'episode': mappo.n_episodes + 1,
            #     'avg_violations': avg_stats['violations'],
            #     'std_violations': std_stats['violations'],
            #     'avg_interventions': avg_stats['interventions'],
            #     'std_interventions': std_stats['interventions'],
            #     'avg_safety_rate': avg_stats['safety_rates'],
            #     'std_safety_rate': std_stats['safety_rates']
            # }
            
            # # 如果是第一次保存，创建新数组，否则加载并追加
            # safety_stats_file = output_dir + '/safety_stats.npy'
            # if os.path.exists(safety_stats_file):
            #     safety_stats_list = list(np.load(safety_stats_file, allow_pickle=True))
            #     safety_stats_list.append(safety_stats)
            # else:
            #     safety_stats_list = [safety_stats]
            # np.save(safety_stats_file, np.array(safety_stats_list))


            # # save the model
            # mappo.save(dirs['models'], mappo.n_episodes + 1)

    # save the model
    mappo.save(dirs['models'], MAX_EPISODES + 2)

    # # 关闭交互模式
    # plt.ioff()
    # plt.show()
    writer.close()

    plt.figure()
    plt.plot(eval_rewards)
    plt.xlabel("Episode")
    plt.ylabel("Average Reward")
    plt.legend(["MAPPO"])
    # plt.show()
    plt.savefig("/home/tianxj/code/MARL_CAVs/docs/reward_curve.png")


def evaluate(args):
    if os.path.exists(args.model_dir):
        model_dir = args.model_dir + '/models/'
    else:
        raise Exception("Sorry, no pretrained models")
    config_dir = args.model_dir + '/configs/configs_ppo.ini'
    config = configparser.ConfigParser()
    config.read(config_dir)

    video_dir = args.model_dir + '/eval_videos'
    eval_logs = args.model_dir + '/eval_logs'

    # model configs
    BATCH_SIZE = config.getint('MODEL_CONFIG', 'BATCH_SIZE')
    MEMORY_CAPACITY = config.getint('MODEL_CONFIG', 'MEMORY_CAPACITY')
    ROLL_OUT_N_STEPS = config.getint('MODEL_CONFIG', 'ROLL_OUT_N_STEPS')
    reward_gamma = config.getfloat('MODEL_CONFIG', 'reward_gamma')
    actor_hidden_size = config.getint('MODEL_CONFIG', 'actor_hidden_size')
    critic_hidden_size = config.getint('MODEL_CONFIG', 'critic_hidden_size')
    MAX_GRAD_NORM = config.getfloat('MODEL_CONFIG', 'MAX_GRAD_NORM')
    ENTROPY_REG = config.getfloat('MODEL_CONFIG', 'ENTROPY_REG')
    reward_type = config.get('MODEL_CONFIG', 'reward_type')
    TARGET_UPDATE_STEPS = config.getint('MODEL_CONFIG', 'TARGET_UPDATE_STEPS')
    TARGET_TAU = config.getfloat('MODEL_CONFIG', 'TARGET_TAU')

    # train configs
    actor_lr = config.getfloat('TRAIN_CONFIG', 'actor_lr')
    critic_lr = config.getfloat('TRAIN_CONFIG', 'critic_lr')
    EPISODES_BEFORE_TRAIN = config.getint('TRAIN_CONFIG', 'EPISODES_BEFORE_TRAIN')
    reward_scale = config.getfloat('TRAIN_CONFIG', 'reward_scale')

    # init env
    env = gym.make('merge-multi-agent-v0')
    env.config['seed'] = config.getint('ENV_CONFIG', 'seed')
    env.config['simulation_frequency'] = config.getint('ENV_CONFIG', 'simulation_frequency')
    env.config['duration'] = config.getint('ENV_CONFIG', 'duration')
    env.config['policy_frequency'] = config.getint('ENV_CONFIG', 'policy_frequency')
    env.config['COLLISION_REWARD'] = config.getint('ENV_CONFIG', 'COLLISION_REWARD')
    env.config['HIGH_SPEED_REWARD'] = config.getint('ENV_CONFIG', 'HIGH_SPEED_REWARD')
    env.config['HEADWAY_COST'] = config.getint('ENV_CONFIG', 'HEADWAY_COST')
    env.config['HEADWAY_TIME'] = config.getfloat('ENV_CONFIG', 'HEADWAY_TIME')
    env.config['MERGING_LANE_COST'] = config.getint('ENV_CONFIG', 'MERGING_LANE_COST')
    env.config['traffic_density'] = config.getint('ENV_CONFIG', 'traffic_density')
    traffic_density = config.getint('ENV_CONFIG', 'traffic_density')
    env.config['action_masking'] = config.getboolean('MODEL_CONFIG', 'action_masking')

    assert env.T % ROLL_OUT_N_STEPS == 0
    state_dim = env.n_s
    action_dim = env.n_a
    test_seeds = args.evaluation_seeds
    seeds = [int(s) for s in test_seeds.split(',')]

    mappo = MAPPO(env=env, memory_capacity=MEMORY_CAPACITY,
                  state_dim=state_dim, action_dim=action_dim,
                  batch_size=BATCH_SIZE, entropy_reg=ENTROPY_REG,
                  roll_out_n_steps=ROLL_OUT_N_STEPS,
                  actor_hidden_size=actor_hidden_size, critic_hidden_size=critic_hidden_size,
                  actor_lr=actor_lr, critic_lr=critic_lr, reward_scale=reward_scale,
                  target_update_steps=TARGET_UPDATE_STEPS, target_tau=TARGET_TAU,
                  reward_gamma=reward_gamma, reward_type=reward_type,
                  max_grad_norm=MAX_GRAD_NORM, test_seeds=test_seeds,
                  episodes_before_train=EPISODES_BEFORE_TRAIN, traffic_density=traffic_density
                  )

    # # load the model if exist 源代码 修改
    # mappo.load(model_dir, train_mode=False)
    # rewards, _, steps, avg_speeds = mappo.evaluation(env, video_dir, len(seeds), is_train=False)

    # load the model if exist
    mappo.load(model_dir, global_step = 100000, train_mode=False)
    rewards, (vehicle_speed, vehicle_position), steps, avg_speeds = mappo.evaluation(env, video_dir, len(seeds),
                                                                                     is_train=False)
    rewards_mu, rewards_std = agg_double_list(rewards)
    success_rate = sum(np.array(steps) == 100) / len(steps)
    avg_speeds_mu, avg_speeds_std = agg_double_list(avg_speeds)

    print("Evaluation Reward and std %.2f, %.2f " % (rewards_mu, rewards_std))
    print("Collision Rate %.2f" % (1 - success_rate))
    print("Average Speed and std %.2f , %.2f " % (avg_speeds_mu, avg_speeds_std))

    np.save(eval_logs + '/{}'.format('eval_rewards'), np.array(rewards,dtype=object))
    np.save(eval_logs + '/{}'.format('eval_steps'), np.array(steps))
    np.save(eval_logs + '/{}'.format('eval_avg_speeds'), np.array(avg_speeds))
    np.save(eval_logs + '/{}'.format('vehicle_speed'), np.array(vehicle_speed))
    np.save(eval_logs + '/{}'.format('vehicle_position'), np.array(vehicle_position,dtype=object))


if __name__ == "__main__":
    args = parse_args()
    # train or eval
    if args.option == 'train':
        train(args)
    else:
        evaluate(args)
