#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File    : simulation.simulation.py
# @Note    : 智能体控制Vissim进行仿真交互

import time
import numpy as np
import matplotlib.pyplot as plt
from winreg import OpenKey, QueryValueEx, HKEY_CURRENT_USER
from PyQt5.QtWidgets import QWidget
from torch import cuda, load
from torch.backends import cudnn
from PyQt5.QtCore import pyqtSignal
from environment.vissim import VisCom
import agent.dqn as ag
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

# 判断是否安装了cuda
device = 'cuda' if cuda.is_available() else 'cpu'
# print(device)
# 判断是否安装了cuDNN
CUDNN = cudnn.is_available()
# print(CUDNN)


# 超参数
BATCH_SIZE = 32  # 批处理大小
LR = 0.01  # 学习率
EPSILON = 0.95  # greedy 贪婪策略
GAMMA = 0.95  # 奖赏折扣
UPDATE_STEP = 20  # 目标网络更新步长
MEMORY_CAPACITY = 100  # 存储池容量

N_ACTIONS = 20
N_STATES = 24
ENV_A_SHAPE = 0
NODE = 100

EPSILON_MIN = 0.05    # greedy 最小贪婪策略
EPISODE = 100         # 训练回合数
MAX_STEP = 1000          # 最大回合步长
TEST_STEP = 1000         # 测试步长
DAMPING = (EPSILON_MIN/EPSILON)**(1/EPISODE) # 探索衰减因子
TEST_FREQUENCY = 0.02  # 测试频率
ZEROREWARD = 50       # 延误 == 54s  <-->  奖励 == 0
CONVERGENCE_UP = 2    # 奖励收敛上限  (range:-10~10)
CONVERGENCE_LOW = -6  # 奖励收敛下限(range:-10~10)
CONVERGENCE = 20      # 收敛计数器

ALGORITHM = "DQN"
LOSS = 0
OPTIM = 0
ACTIVATE = 0

program_file = 'run vissim.bat'

def get_desktop_path():
    key = OpenKey(HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
    return QueryValueEx(key, "Desktop")[0]


class Sim(QWidget):
    SimInfoEmit = pyqtSignal(str)
    EvaInfoEmit = pyqtSignal(str)
    EndInfoEmit = pyqtSignal(bool)
    RateProgressInfoEmit = pyqtSignal(int)

    def __init__(self):
        super().__init__()
        self.stop_flag = False
        self.EndInfoEmit.connect(self.end_simulation)

    @staticmethod
    def agent_revalue():
        ag.device = device
        ag.BATCH_SIZE = BATCH_SIZE
        ag.LR = LR
        ag.EPSILON = EPSILON
        ag.GAMMA = GAMMA
        ag.UPDATE_STEP = UPDATE_STEP
        ag.MEMORY_CAPACITY = MEMORY_CAPACITY
        ag.N_ACTIONS = N_ACTIONS
        ag.N_STATES = N_STATES
        ag.ENV_A_SHAPE = ENV_A_SHAPE
        ag.NODE = NODE

    @staticmethod
    def get_reward(delay):
        if delay < ZEROREWARD - 10:
            return 10
        elif delay < ZEROREWARD - 9:
            return 9
        elif delay < ZEROREWARD - 8:
            return 8
        elif delay < ZEROREWARD - 7:
            return 7
        elif delay < ZEROREWARD - 6:
            return 6
        elif delay < ZEROREWARD - 5:
            return 5
        elif delay < ZEROREWARD - 4:
            return 4
        elif delay < ZEROREWARD - 3:
            return 3
        elif delay < ZEROREWARD - 2:
            return 2
        elif delay < ZEROREWARD - 1:
            return 1
        elif delay < ZEROREWARD:
            return 0
        elif delay < ZEROREWARD - (-1):
            return -1
        elif delay < ZEROREWARD - (-2):
            return -2
        elif delay < ZEROREWARD - (-3):
            return -3
        elif delay < ZEROREWARD - (-4):
            return -4
        elif delay < ZEROREWARD - (-5):
            return -5
        elif delay < ZEROREWARD - (-6):
            return -6
        elif delay < ZEROREWARD - (-7):
            return -7
        elif delay < ZEROREWARD - (-8):
            return -8
        elif delay < ZEROREWARD - (-9):
            return -9
        else:
            return -10

    def end_simulation(self, flag):
        if flag:
            self.stop_flag = True
            pass

    def creat_environment(self, net_path, simulation, plans):
        env = VisCom(net_path, program_file, simulation, plans)
        ag.N_STATES = env.observation_space.shape[0]
        ag.N_ACTIONS = env.action_space.n
        if isinstance(env.action_space.sample(), int):
            ag.ENV_A_SHAPE = 0
        else:
            ag.ENV_A_SHAPE = self.env.action_space.sample().shape
        return env

    def darw_record(self, file):
        # 绘制奖励曲线
        names = ["delay", "reward", "step", "epsilon"]
        data = pd.read_csv(file, error_bad_lines=False, sep="\s+", names=names)
        delay = list(data["delay"].values)
        reward = list(data["reward"].values)
        epsilon = list(data["epsilon"].values)
        print(delay)
        print(reward)
        """延误"""
        # 绘图
        x = np.linspace(0, len(delay), len(delay))
        plt.plot(x, delay)
        # 设置坐标轴范围
        plt.xlim([0, 100])
        # plt.ylim([0.3, 1.0])
        # 设置坐标轴刻度
        plt.xticks(range(0, 110, 10))
        # plt.yticks(np.arange(0.3, 1.1, 0.1))
        # 设置坐标轴名称
        plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
        plt.ylabel("Delay", fontproperties="Times New Roman", size=10.5)
        # 设置网格
        plt.grid()
        # 设置图例
        legend = ["delay"]
        plt.legend(legend, loc="best", frameon=False)
        # 设置标题
        plt.title("The Delay Curve", fontproperties="Times New Roman", size=10.5)
        # 保存图片
        plt.savefig("./model/delay.svg")
        # 关闭绘图
        plt.close()

        """奖赏"""
        # 绘图
        x = np.linspace(0, len(reward), len(reward))
        plt.plot(x, reward)
        # 设置坐标轴范围
        plt.xlim([0, 100])
        # plt.ylim([0.3, 1.0])
        # 设置坐标轴刻度
        plt.xticks(range(0, 110, 10))
        # plt.yticks(np.arange(0.3, 1.1, 0.1))
        # 设置坐标轴名称
        plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
        plt.ylabel("Reward", fontproperties="Times New Roman", size=10.5)
        # 设置网格
        plt.grid()
        # 设置图例
        legend = ["reward"]
        plt.legend(legend, loc="best", frameon=False)
        # 设置标题
        plt.title("The Reward Curve", fontproperties="Times New Roman", size=10.5)
        # 保存图片
        plt.savefig("./model/reward.svg")
        # 关闭绘图
        plt.close()
        print("done")

        eva_info = "draw record curve done!"
        self.EvaInfoEmit.emit(eva_info)

    @staticmethod
    def normalization_process(observation):
        """flow + speed + queue"""
        # state = observation
        flow = list(map(lambda x:round((x-0)/(25-0),2), observation[0:8]))
        speed = list(map(lambda x:round((x-0)/(60-0),2), observation[8:16]))
        queue = list(map(lambda x:round((x-0)/(400-0),2), observation[16:24]))
        state = flow + speed + queue
        return state

    def test(self, env, agent, online_net, target_net):
        # 加载目标网络
        agent.online_net.load_state_dict(load(online_net, map_location=device))
        agent.target_net.load_state_dict(load(target_net, map_location=device))

        eva_info = "\nStart of Test the Best Train Network Performance\nonline network: {}\ntarget network: {}". \
            format(online_net, target_net)
        self.EvaInfoEmit.emit(eva_info)

        # 初始化参数
        test_start = time.perf_counter()
        test_mean_reward = 0
        test_mean_delay = 0

        # 可视化visism仿真图像
        env.render()

        # 重置环境获取初始交通流状态
        observation = env.reset()
        state = self.normalization_process(observation)

        # 热身时间
        for i in range(1):
            observation, reward, done, info = env.step(env.action_space.sample())
            state = self.normalization_process(observation)

        # 仿真运行500个周期
        for step in range(TEST_STEP):
            if self.stop_flag:
                self.stop_flag = False
                env.stop()
                break
            else:
                # 智能体由交通流状态获取配时动作方案
                action = agent.action(state, random=False)

                # vissim环境采取动作运行一周期，得到下一周期的状态信息
                observation, reward, done, info = env.step(action)
                next_state = self.normalization_process(observation)

                sim_info = "%-4s %-173s" % (str(step), str(info))
                self.SimInfoEmit.emit(sim_info)

                # 重定义奖励
                delay = reward
                redefine_reward = self.get_reward(delay)

                # 更新状态、奖励、平均延误、当前回合仿真步数
                state = next_state
                if step == 0:
                    test_mean_reward = redefine_reward
                    test_mean_delay = delay
                else:
                    test_mean_reward = (test_mean_reward + redefine_reward) / 2
                    test_mean_delay = (test_mean_delay + delay) / 2

                # 保存测试记录信息
                with open('./model/test_record.txt', 'a+') as f:
                    record = "%s\t%s\t%s\t%s\t\n" % \
                             (str(step), str(state), str(delay), str(redefine_reward))
                    f.write(record)

        # 输出最佳网络的测试奖励值和延误值
        eva_info = 'test step: {}, test_mean_delay: {}, test_mean_reward: {}'. \
            format(step, round(test_mean_delay, 3), round(test_mean_reward, 3))
        self.EvaInfoEmit.emit(eva_info)

        # 输出测试时间
        test_time = time.perf_counter() - test_start
        h, ss = divmod(test_time, 3600)
        m, s = divmod(ss, 60)
        eva_info = "complete test time: {} second, that is {} hour, {} minute, {} second".format(test_time, h, m, s)
        self.EvaInfoEmit.emit(eva_info)

    def train(self, env, agent):
        # 保存初始网络
        agent.save(0)
        # 初始化训练参数
        train_start = time.perf_counter()
        best_reward = -10
        # 开始训练
        for episode in range(EPISODE):
            # 启动当前回合训练
            message = "Start of the {} Episode Train".format(episode)
            sp = int((178 - len(message)) / 2)
            sim_info = "\n" + " " * sp + "%s" % (str(message)) + "\n"
            self.SimInfoEmit.emit(sim_info)

            eva_info = "\nStart of the {} Episode Evaluate".format(episode)
            self.EvaInfoEmit.emit(eva_info)

            rate_progress = int(episode / EPISODE * 100)
            self.RateProgressInfoEmit.emit(rate_progress)

            # 保存归一化状态信息
            with open('./model/normalization_state.txt', 'a+') as f:
                f.write(sim_info)

            # 初始化参数
            start = time.perf_counter()
            mean_reward = 0
            mean_delay = 0
            success = 0
            fail = 0
            step_count = 0

            # 可视化显示vissim仿真画面
            # env.render()

            # 重启环境并获取初始交通流状态
            observation = env.reset()
            state = self.normalization_process(observation)

            # 热身时间
            for i in range(1):
                observation, reward, done, info = env.step(env.action_space.sample())
                state = self.normalization_process(observation)

            # 保存归一化状态信息
            with open('./model/normalization_state.txt', 'a+') as f:
                f.write(str(state) + '\n')

            # 运行当前回合
            for step in range(MAX_STEP):
                if self.stop_flag:
                    self.stop_flag = False
                    break
                else:
                    # 由交通流状态获取配时动作方案
                    action = agent.action(state)

                    # vissim环境采取动作运行一周期，得到下一周期的状态信息
                    observation, reward, done, info = env.step(action)
                    next_state = self.normalization_process(observation)

                    sim_info = "%-4s %-173s" % (str(step), str(info))
                    self.SimInfoEmit.emit(sim_info)

                    # 重定义奖励
                    delay = reward
                    redefine_reward = self.get_reward(delay)

                    # 判断收敛条件
                    if redefine_reward >= CONVERGENCE_UP:
                        success += 1
                    else:
                        success = 0
                    if redefine_reward <= CONVERGENCE_LOW:
                        fail += 1
                    else:
                        fail = 0
                    if (success >= CONVERGENCE) or (fail >= CONVERGENCE):
                        done = True

                    # 存储样本到经验池
                    agent.store(state, action, redefine_reward, next_state, done)

                    # 更新状态、奖励、平均延误、当前回合训练步数
                    state = next_state
                    if step == 0:
                        mean_reward = redefine_reward
                        mean_delay = delay
                    else:
                        mean_reward = (mean_reward + redefine_reward) / 2
                        mean_delay = (mean_delay + delay) / 2
                    step_count += 1

                    # 更新目标网络参数
                    if agent.memory_counter > MEMORY_CAPACITY:
                        agent.learn(ALGORITHM)

                    # 保存归一化状态信息
                    with open('./model/normalization_state.txt', 'a+') as f:
                        f.write(str(state) + '\n')

                    # 保存当前回合历史选择动作
                    with open('./model/train_episode_action.txt', 'a+') as f:
                        f.write(str(action) + ',')

                    # 保存当前回合历史平均延误
                    with open('./model/train_episode_delay.txt', 'a+') as f:
                        f.write(str(delay) + ',')

                    # 保存当前回合历史成功收敛计数器
                    with open('./model/train_episode_convergence.txt', 'a+') as f:
                        f.write(str(success) + ',')

                    # 判断当前回合仿真结束标志
                    if done:
                        break

            # 逐渐衰减探索率
            if ag.EPSILON > EPSILON_MIN:
                ag.EPSILON *= DAMPING

            # 输出并保存当前回合总步数、平均奖励、平均延误、最终探索概率
            eva_info = "episode: {}, step: {}, mean_delay: {}, mean_reward: {}, final_epsilon: {}". \
                format(episode, step_count, round(mean_delay, 3), round(mean_reward, 3), ag.EPSILON)
            self.EvaInfoEmit.emit(eva_info)

            # 保存训练记录文件
            with open('./model/train_record.txt', 'a+') as f:
                f.write("%-6s %-6s %-6s %-20s\n" %
                        (str(round(mean_delay, 3)), str(round(mean_reward, 3)), str(step_count), str(ag.EPSILON)))

            # 保存当前回合历史选择动作
            with open('./model/train_episode_action.txt', 'a+') as f:
                f.write('\n')

            # 保存当前回合历史平均延误
            with open('./model/train_episode_delay.txt', 'a+') as f:
                f.write('\n')

            # 保存当前回合历史成功收敛计数器
            with open('./model/train_episode_convergence.txt', 'a+') as f:
                f.write('\n')

            # 保存当前回合误差信息
            with open('./model/loss.txt', 'a+') as f:
                f.write('\n')

            # 输出预计剩余训练时间
            train_episode_time = time.perf_counter() - start
            remain_time = train_episode_time * (EPISODE - 1 - episode)
            h, ss = divmod(remain_time, 3600)
            m, s = divmod(ss, 60)
            eva_info = "episode {} train time: {} second, remain simulation time: {} hour, {} minute, {} second". \
                format(episode, train_episode_time, h, m, s)
            self.EvaInfoEmit.emit(eva_info)

            # 测试网络性能
            if episode % max(1, int(EPISODE * TEST_FREQUENCY)) == 0:
                message = "Test of the network ......"
                sp = int((178 - len(message)) / 2)
                sim_info = "\n" + " " * sp + "%s" % (str(message)) + "\n"
                self.SimInfoEmit.emit(sim_info)
                eva_info = 'episode: {}, test the current network performance'.format(episode)
                self.EvaInfoEmit.emit(eva_info)
                # 初始化参数
                test_mean_reward = 0
                test_mean_delay = 0

                # 可视化vissim仿真画面
                # env.render()

                # 重启环境并获取初始交通流状态
                observation = env.reset()
                state = self.normalization_process(observation)

                # 热身时间
                for i in range(1):
                    observation, reward, done, info = env.step(env.action_space.sample())
                    state = self.normalization_process(observation)

                # 运行500个仿真周期
                for step in range(TEST_STEP):
                    if self.stop_flag:
                        self.stop_flag = False
                        env.stop()
                        break
                    else:
                        # 由交通流状态获取配时动作方案
                        action = agent.action(state, random=False)

                        # vissim环境采取动作运行一周期，得到下一周期的状态信息
                        observation, reward, done, info = env.step(action)
                        next_state = self.normalization_process(observation)

                        sim_info = "%-4s %-173s" % (str(step), str(info))
                        self.SimInfoEmit.emit(sim_info)

                        # 重定义奖励
                        delay = reward
                        redefine_reward = self.get_reward(delay)

                        # 更新状态、测试总奖励、测试平均延误
                        state = next_state
                        if step == 0:
                            test_mean_reward = redefine_reward
                            test_mean_delay = delay
                        else:
                            test_mean_reward = (test_mean_reward + redefine_reward) / 2
                            test_mean_delay = (test_mean_delay + delay) / 2

                # 输出当前回合、测试平均延误、测试平均奖励
                eva_info = 'episode: {}, test_mean_delay: {}, test_mean_reward: {}'. \
                    format(episode, round(test_mean_delay, 3), round(test_mean_reward, 3))
                self.EvaInfoEmit.emit(eva_info)

                # 保存历史训练最优网络模型
                if test_mean_reward > best_reward:
                    best_reward = test_mean_reward
                    agent.save(episode)
                    eva_info = '{} episode model has been save...'.format(episode)
                    self.EvaInfoEmit.emit(eva_info)

        # 结束训练
        message = "End of the Train"
        sp = int((178 - len(message)) / 2)
        sim_info = "\n" + " " * sp + "%s" % (str(message)) + "\n"
        self.SimInfoEmit.emit(sim_info)
        rate_progress = int(100)
        self.RateProgressInfoEmit.emit(rate_progress)
        # 输出训练时间
        train_time = time.perf_counter() - train_start
        h, ss = divmod(train_time, 3600)
        m, s = divmod(ss, 60)
        eva_info = "complete train time: {} second, that is {} hour, {} minute, {} second".format(train_time, h, m, s)
        self.EvaInfoEmit.emit(eva_info)

    def run(self, net_path, simulation, plans):
        # 创建vissim仿真环境
        env = self.creat_environment(net_path, simulation, plans)
        # 记录程序启动时间
        program_start = time.perf_counter()
        # 重赋值智能体参数
        self.agent_revalue()
        # 定义智能体
        my_agent = ag.Agent()
        # 训练智能体
        self.train(env, my_agent)
        # 测试最佳训练网络的性能
        self.test(env, my_agent, './model/online_network_best.pkl', './model/target_network_best.pkl')
        # 绘制延误、奖赏曲线
        self.darw_record('./model/train_record.txt')
        # 记录程序结束时间
        program_end = time.perf_counter()
        # 输出程序运行时间
        eva_info = "program run time:%d" % (program_end - program_start)
        self.EvaInfoEmit.emit(eva_info)
