import os
import colorama
from tools.simulation_result_plotter import draw_lineChart
from Entity.env import Scenario
from Entity.ComputingUnit import *
import time
import json
from colorama import Fore, Style
from simulation_constants import *
import xml.etree.ElementTree as ET
from Entity.offloadingStrategy.SAC import *
from Entity.offloadingStrategy.DDPG import *
from Entity.offloadingStrategy.PPO import *


def get_simulation_result(file_name) -> list:
    """ 获取仿真结果
    args:
        file_name (str): 仿真结果文件名
    return:
        loaded_data (list): 仿真结果列表
    """
    with open(f'./simulation_results/{file_name}.json', 'r') as f:
        loaded_data = json.load(f)
    return loaded_data


def set_simulation_result(simulation_results, file) -> None:
    """ 记录仿真结果 """
    with open(file, 'w') as f:
        json.dump(simulation_results, f)


def convert_seconds(seconds):
    """ 用于转换秒数为标准输出格式 """
    ans = str()
    hours = seconds // 3600  # 计算小时
    ans += f"{int(hours)}小时" if hours != 0 else ""
    minutes = (seconds % 3600) // 60  # 计算分钟
    ans += f"{int(minutes)}分钟" if minutes != 0 else ""
    remaining_seconds = seconds % 60  # 计算剩余的秒数
    ans += f"{int(remaining_seconds)}秒" if remaining_seconds != 0 else ""
    return ans


def get_24_hour_time():
    timestamp = time.time()
    local_time = time.localtime(timestamp)
    formatted_time = time.strftime("%H:%M", local_time)
    return formatted_time


def xml_2_scenario_2_policydict(scenarios_dir_path,
                            scenarios_count=10,
                            offloading_strategy_decision=-1) -> None:
    """ 提前遍历 动态场景 为所有出现的车辆初始化决策网络

    args:
        sumo_cfg (str): sumo 配置文件路径
        message_gen_poss_lambda (float): 任务生成速率参数
        scenario_name (str): 场景名称
        xml_loc (str): 交通场景 xml 文件路径
        offloading_strategy_decision (int): 计算卸载策略编号

    return:
        root (Element[str]): 交通场景 xml 根节点
        obus_policy_dict (dict): 所有车辆的决策网络, key = 车辆ID, value = 决策网络
    """


    # 初始化交通场景 xml
    root = ET.ElementTree(file=scenarios_dir_path+"/"+str(random.randint(0, scenarios_count-1))+".xml").getroot()

    # 初始化所有车辆的决策网络, key = 车辆ID, value = 决策网络
    obus_policy_dict = dict()
    for step_i in range(len(root)):
        for vehicle in root[step_i]:
            if vehicle.attrib["id"] not in obus_policy_dict.keys():
                obus_policy_dict[vehicle.attrib["id"]] = generate_agent_based_on_offloading_strategy(offloading_strategy_decision)

    return root, obus_policy_dict


def generate_agent_based_on_offloading_strategy(offloading_strategy_decision):
    """ 根据卸载策略编号创建对应的决策网络

    args:
        offloading_strategy_decision (int): 计算卸载策略编号

    return:
        agent (Agent): 对应的决策网络
    """
    if offloading_strategy_decision in [0, 1, 2]:
        return None
    elif offloading_strategy_decision in {4, 6}:
        return PPOAgent(state_dim=STATE_DIM,
                        action_dim=ACTION_DIM,
                        has_continuous_action_space=False)
    elif offloading_strategy_decision in {3, 5}:
        return DDPGAgent(state_dim=STATE_DIM, 
                        action_dim=ACTION_DIM)
    elif offloading_strategy_decision in {7, 8}:
        return SACAgent(state_dim=STATE_DIM,
                        action_dim=ACTION_DIM)
    else:
        raise Exception('错误的初始化  决策网络分支')

 
def one_simulation(offloading_strategy_decision=0,
                  num_episode=500,
                  scenario_map="",
                  scenario_gen_para=100,
                  message_gen_poss_lambda=0.15,
                  datetime="日期设置故障",
                  non_learning_pth_url=""):
    """ 为一种卸载策略进行的一次完整训练
    
    args:
        offloading_strategy_decision (int): 计算卸载策略编号
        num_episode (int): 本次模拟仿真轮次
        scenario_map (str): 仿真场景地图
        scenario_gen_para (float): 仿真场景生成参数
        message_gen_poss_lambda (float): 任务车辆通过泊松过程生成计算任务的速率参数
        datetime (str): 任务开始时间字符串
        non_learning_pth_url (str): 只应用训练好的决策权重参数进行决策而不学习时使用此url定位参数, 缺省则进行DRL
    
    return:
        max_reward (float) | None: 最大训练奖励
    """

    start_time = time.time()
    colorama.init()

    # 整合完整动态仿真名称
    if scenario_map == 'highway' or scenario_map == 'grid_network':
        scenario_name = f"scenario_{scenario_map}_{scenario_map2paraName[scenario_map][0]}={scenario_gen_para}_lam={message_gen_poss_lambda}"
    elif scenario_map == 'NGSIM_I-80' or scenario_map == 'NGSIM_US-101':
        scenario_name = f"scenario_{scenario_map}_lam={message_gen_poss_lambda}"
    else:
        raise Exception('未设置的交通场景')
    xml_loc = f"../scenario_xml_generate/scenario_xml/{scenario_name}"  # 仿真场景配置

    # 创建用于存储仿真结果(训练权重 和 奖励列表)的本地目录
    result_url = f"./simulation_results/{datetime}_{scenario_name}"
    os.makedirs(f"{result_url}/policy/federated_DDPG", exist_ok=True)
    os.makedirs(f"{result_url}/policy/federated_PPO", exist_ok=True)
    os.makedirs(f"{result_url}/result", exist_ok=True)

    # 总仿真结果记录
    total_reward_list = []
    success_ontime_task_rates_list = []
    total_task_totaltime_list = []
    total_task_energy_list = []

    # 用于辅助记录获取一次训练中最大奖励时的神经网络参数
    max_reward = 0

    # 根据场景初始化场景中的 RSU
    if scenario_map == "highway":
        # 示例: 在1200米高速公路场景上初始化3个RSU单元, 单元之间相距400米, 每个单元负责前后200米范围, 计算能力为 4G Cycles/s
        rsu1 = RSU(id="rsu1", x=200, y=0)
        rsu2 = RSU(id="rsu2", x=600, y=0)
        rsu3 = RSU(id="rsu3", x=1000, y=0)
        scenario = Scenario({rsu1.id: rsu1, rsu2.id: rsu2, rsu3.id: rsu3})
    elif scenario_map == "gridNetwork":
        rsu1 = RSU(id="rsu1", x=200, y=200)
        rsu2 = RSU(id="rsu2", x=200, y=400)
        rsu3 = RSU(id="rsu3", x=400, y=200)
        rsu4 = RSU(id="rsu4", x=400, y=400)
        scenario = Scenario({rsu1.id: rsu1, rsu2.id: rsu2, rsu3.id: rsu3, rsu4.id: rsu4})
    elif scenario_map == 'NGSIM_I-80':
        rsu1 = RSU(id="rsu1", x=0, y=200)
        rsu2 = RSU(id="rsu2", x=0, y=600)
        rsu3 = RSU(id="rsu3", x=0, y=1000)
        rsu4 = RSU(id="rsu4", x=0, y=1400)
        rsu5 = RSU(id="rsu5", x=0, y=1800)
        scenario = Scenario({rsu1.id: rsu1, rsu2.id: rsu2, rsu3.id: rsu3, rsu4.id: rsu4, rsu5.id: rsu5})
    elif scenario_map == 'NGSIM_US-101':
        rsu1 = RSU(id="rsu1", x=35, y=200)
        rsu2 = RSU(id="rsu2", x=35, y=600)
        rsu3 = RSU(id="rsu3", x=35, y=1000)
        rsu4 = RSU(id="rsu4", x=35, y=1400)
        rsu5 = RSU(id="rsu5", x=35, y=1800)
        rsu6 = RSU(id="rsu6", x=35, y=2200)
        scenario = Scenario({rsu1.id: rsu1, rsu2.id: rsu2, rsu3.id: rsu3, rsu4.id: rsu4, rsu5.id: rsu5, rsu6.id: rsu6})
    else:
        pass
    

    # 初始化RSU的决策网络, 如果非学习训练则加载权重参数
    for rsu in scenario.rsus.values():
        rsu.os_policy = generate_agent_based_on_offloading_strategy(offloading_strategy_decision)
        if non_learning_pth_url != "":
            rsu.os_policy.actor.load_state_dict(torch.load(f"{non_learning_pth_url}/policy/{index2name[offloading_strategy_decision]}/{rsu.id}.pth"))

    # 分布式决策需要提前初始化场景中出现的每个车辆的决策网络
    if offloading_strategy_decision in is_distributed_strategy:
        traffic_scenario_root, obus_policy_dict = xml_2_scenario_2_policydict(
                                                         scenarios_dir_path=xml_loc,
                                                         offloading_strategy_decision=offloading_strategy_decision)

    # 开始训练
    for episode in range(num_episode):
        # 仿真结果统计
        total_reward = 0                        # 本轮总奖励
        task_count = 0                          # 总任务计数
        success_ontime_task_count = 0           # 准时完成任务计数
        success_process_task_count = 0          # 成功处理任务计数, 即去除了卸载失败的任务, 包含成功处理的任务和超时的任务
        sum_success_process_task_totaltime = 0  # 成功处理任务的总处理时长, 用于计算平均处理时长
        sum_success_process_total_energy = 0    # 成功处理任务的总能耗, 用于计算平均能耗

        task_offloading_count = 0                   # 卸载到其他CU的任务数量
        success_ontime_task_offloading_count = 0    # 卸载到其他CU的准时完成的任务计数
        # 去除新增的失败分类与时间统计（仅保留原有统计）

        # 生成一个相同交通需求的不同场景
        if offloading_strategy_decision not in is_distributed_strategy:
            traffic_scenario_root, obus_policy_dict = xml_2_scenario_2_policydict(
                                                             scenarios_dir_path=xml_loc,
                                                             offloading_strategy_decision=offloading_strategy_decision)
        scenario.reset_scenario(traffic_scenario_root, obus_policy_dict)

        # 该循环每循环一次表示时间前进0.1秒(SUMO交通场景进入下一帧), 等效于 env.step(), 从仿真100s交通流稳定后开始
        for step_i in range(len(traffic_scenario_root)):

            task_queue = scenario.step(step_i)

            # 开始 逐个处理车辆当前时隙产生的任务
            for this_task in task_queue:
                
                # 做出任务卸载决策
                reward, total_time, is_timeout, is_offloading, total_energy = scenario.vehicles[
                    this_task.task_vehicle_id].make_task_offloading_decision(
                    task=this_task,
                    scenario=scenario,
                    offloading_strategy_decision=offloading_strategy_decision,
                    non_learning=True if non_learning_pth_url != "" else False)

                # success_offload_task_count += 1 if reward != 0 else 0
                task_count += 1
                success_ontime_task_count += 1 if is_timeout != True else 0
                task_offloading_count += 1 if is_offloading else 0
                success_ontime_task_offloading_count += 1 if is_timeout != True and is_offloading else 0
                success_process_task_count += 1 if total_time != -1 else 0
                sum_success_process_task_totaltime += total_time if total_time != -1 else 0
                sum_success_process_total_energy += total_energy if total_time != -1 else 0
                # 累积当前任务的奖励到本次模拟的回报中去, 并将奖励限制在 [0, +inf) 范围内
                # total_reward += max(0, reward - 100 * total_energy)
                total_reward += max(0, reward)
                # 结束 处理车辆当前时隙产生的任务

        # 如果卸载策略为集中式深度强化学习, 记录最大奖励对应的决策网络参数
        if (offloading_strategy_decision in is_centralized_strategy) and total_reward > max_reward:
            max_reward = total_reward
            for rsu in scenario.rsus.values():
                if offloading_strategy_decision == 5:
                    torch.save(rsu.os_policy.actor.state_dict(), f"{result_url}/policy/federated_DDPG/{rsu.id}.pth")
                else:
                    torch.save(rsu.os_policy.actor.state_dict(), f"{result_url}/policy/federated_PPO/{rsu.id}.pth")

        # 记录本轮模拟的统计结果
        total_reward_list.append(total_reward)
        total_task_totaltime_list.append((sum_success_process_task_totaltime / success_process_task_count) if success_process_task_count != 0 else 0)
        success_ontime_task_rates_list.append(round(success_ontime_task_count / task_count * 100, 3))
        total_task_energy_list.append((sum_success_process_total_energy / success_process_task_count) if success_process_task_count != 0 else 0)
        

        # offloading_strategy_decision += 2 if non_learning_pth_url != "" else 0

        if episode + 1 != num_episode:
            print(Fore.RED + f"\r{index2name[offloading_strategy_decision]} 卸载已完成 {1 + episode}/{num_episode}, 已耗时 {convert_seconds(time.time()-start_time)}",
                  end='' + Style.RESET_ALL)
        else:
            print(Fore.RED + f"\r{index2name[offloading_strategy_decision]} 卸载已完成 {1 + episode}/{num_episode}, 已耗时 {convert_seconds(time.time() - start_time)}" + Style.RESET_ALL)
        if task_offloading_count != 0:
            print(
                f"  任务成功率: {success_ontime_task_count / task_count * 100:.3f} %" + 
                f"  任务卸载率: {task_offloading_count / task_count * 100:.3f} %" + 
                f"  成功卸载的任务数量: {success_ontime_task_offloading_count}" +
                f"  卸载任务成功率: {success_ontime_task_offloading_count / task_offloading_count * 100:.3f} %" +
                f"  成功处理任务平均处理时长: {sum_success_process_task_totaltime / success_process_task_count:.3f} 毫秒"
                )
        else:
            print(f"  任务成功率: {success_ontime_task_count / task_count * 100:.3f} %")


        # print(f"成功卸载且准时完成的任务卸载率: {success_ontime_task_offloading_count / task_offloading_count * 100:.3f} %")
        # draw_lineChart(total_reward_list, success_ontime_task_rates_list)
        # print(f"第{episode + 1:d}轮模拟总时间为 {time.time() - start_time:.3f} 秒, 回报为{total_reward_list[-1]}, 任务成功卸载概率为: {success_ontime_task_rates_list[-1]} %")
        # print(f"总共处理了 {task_count:d} 个任务")
        # print(f"任务成功卸载概率为: {success_offload_task_count / task_count * 100:.3f} %")
        # print(f"成功卸载的任务平均处理时长为: {sum_task_totaltime / success_offload_task_count:.3f} 秒")
        # print(f"成功卸载且准时完成的任务概率为: {success_ontime_task_count / task_count * 100:.3f} %")
        # print(f"场景中平均存在的车辆数为 {sum_vehs / 1000:.3f}")

    # print(f"{index2name[offloading_strategy_decision]} 卸载total_reward_list: {total_reward_list}")
    set_simulation_result(total_reward_list, f"{result_url}/result/{index2name[offloading_strategy_decision]}_return.json")
    set_simulation_result(success_ontime_task_rates_list, f"{result_url}/result/{index2name[offloading_strategy_decision]}_successRate.json")
    set_simulation_result(total_task_totaltime_list, f"{result_url}/result/{index2name[offloading_strategy_decision]}_taskTotaltime.json")
    set_simulation_result(total_task_energy_list, f"{result_url}/result/{index2name[offloading_strategy_decision]}_taskEnergy.json")

    # 标志本轮训练中本策略的训练结束, 唯一的 print 函数
    # print(Fore.RED + f"{index2name[offloading_strategy_decision]}策略{NUM_EPISODE}轮卸载训练结束, 耗时{convert_seconds(time.time()-start_time)}" + Style.RESET_ALL)

    if offloading_strategy_decision in is_centralized_strategy:
        return max_reward


def main():

    # genScenarios(scenario_map=scenario_map, 
    #              scenario_gen_para=scenario_gen_para, 
    #              message_gen_poss_lambda=message_gen_poss_lambda, 
    #              num_scenarios=10)

    start_time = time.time()    # 记录仿真开始时间
    datetime = f"{time.localtime().tm_year}.{time.localtime().tm_mon}.{time.localtime().tm_mday}.{time.localtime().tm_hour}.{time.localtime().tm_min}"  # 固定仿真开始时间作为本次仿真的唯一命名符
    
    ######################################   开始仿真   ########################################
    for strategy in stratege_choices:
        max_return1 = one_simulation(offloading_strategy_decision=strategy,
                                    num_episode=NUM_EPISODE,
                                    scenario_map=scenario_map,
                                    scenario_gen_para=scenario_gen_para,
                                    message_gen_poss_lambda=message_gen_poss_lambda,
                                    datetime=datetime)
    ######################################   结束仿真   ########################################
    
    # 整合完整动态仿真名称
    if scenario_map == 'NGSIM_I-80' or scenario_map == 'NGSIM_US-101':
        data_url = f"./simulation_results/{datetime}_scenario_{scenario_map}_lam={message_gen_poss_lambda}/result"
    elif scenario_map == 'highway' or scenario_map == 'gridNetwork':
        data_url = f"./simulation_results/{datetime}_scenario_{scenario_map}_{scenario_map2paraName[scenario_map][0]}={scenario_gen_para}_lam={message_gen_poss_lambda}/result"
    else:
        raise Exception('未设置的绘图分支')
    draw_lineChart(data_url=data_url, save_fig=True)
    
    print(Fore.RED + f"本次训练总耗时：{convert_seconds(time.time() - start_time)}" + Style.RESET_ALL)
    
    # """
    #     构造通知短信并发送
    # """
    # if (sendMessage):
    #     d = dict()
    #     d["number"] = 1
    #     d["end_time"] = get_24_hour_time()
    #     d["total_time"] = convert_seconds(time.time() - start_time)
    #     d["strategy_name1"] = "联邦DDPG"
    #     d["return1"] = f"{max_return1:.2e}"
    #     # d["strategy_name2"] = "联邦PPO"
    #     # d["return2"] = f"{max_return2:.2e}"
    #     SMS.sendSMS(d)


# 启动入口
if __name__ == '__main__':
    # 调用 PyCallGraph 绘制模块调用时间图
    # with PyCallGraph(output=GraphvizOutput()):
    #     main()
    main()