#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/4/22 19:34
# @Author  : LiShan
# @Email   : lishan_1997@126.com
# @File    : simulation.py
# @Note    : this is note

import os
import time
import numpy as np
import matplotlib.pyplot as plt

from collections import deque
from shutil import move

from PyQt5.QtWidgets import QWidget
from torch import load
from PyQt5.QtCore import pyqtSignal

from environment.vissim import VisEnv
import agent.dqn as ag

device = ag.device

# Hyper Parameters
BATCH_SIZE = 32  # batch size
LR = 0.01  # learning rate
EPSILON = 0.9  # greedy policy
GAMMA = 0.9  # reward discount
TARGET_REPLACE_ITER = 100  # target update frequency
MEMORY_CAPACITY = 2000  # memory capacity

ag.BATCH_SIZE = BATCH_SIZE
ag.LR = LR
ag.EPSILON = EPSILON
ag.GAMMA = GAMMA
ag.TARGET_REPLACE_ITER = TARGET_REPLACE_ITER
ag.MEMORY_CAPACITY = MEMORY_CAPACITY

TEST_FREQUENCY = 50
EPISODE = 5000
CONVERGENCE = 20


class Sim(QWidget):
    PrintInfoEmit = pyqtSignal(str)
    TimeInfoEmit = pyqtSignal(str)

    def creat_environment(self, net_path, simulation, timming):
        env = VisEnv(net_path, simulation, timming)
        ag.N_STATES = env.observation_space.shape[0]
        ag.N_ACTIONS = env.action_space.n
        if isinstance(env.action_space.sample(), int):
            ag.ENV_A_SHAPE = 0
        else:
            ag.ENV_A_SHAPE = env.action_space.sample().shape
        return env

    def darw_delay(self, file):
        # 绘制奖励曲线
        reward = []
        with open(file, "r") as f:
            lines = f.readlines()
        for i in range(len(lines)):
            line = lines[i].strip('\n').split()[0]
            reward.append(float(line))
        # 绘图
        x = np.linspace(0, len(reward), len(reward))
        plt.plot(x, reward)
        # 设置坐标轴名称
        plt.xlabel("Step", fontproperties='Times New Roman', size=10.5)
        plt.ylabel("delay", fontproperties='Times New Roman', size=10.5)
        # 设置网格
        plt.grid()
        # 设置图例
        legend = ["delay"]
        plt.legend(legend, loc="best", frameon=False)
        # 设置标题
        plt.title("The Delay Curve", fontproperties='SimSun', size=10.5)
        # 保存图片
        plt.savefig("delay.svg")
        print("draw done!")

    def darw_reward(self, file):
        # 绘制奖励曲线
        reward = []
        with open(file, "r") as f:
            lines = f.readlines()
        for i in range(len(lines)):
            line = lines[i].strip('\n').split()[0]
            reward.append(float(line))
        # 绘图
        x = np.linspace(0, len(reward), len(reward))
        plt.plot(x, reward)
        # 设置坐标轴名称
        plt.xlabel("Step", fontproperties='Times New Roman', size=10.5)
        plt.ylabel("reward", fontproperties='Times New Roman', size=10.5)
        # 设置网格
        plt.grid()
        # 设置图例
        legend = ["reward"]
        plt.legend(legend, loc="best", frameon=False)
        # 设置标题
        plt.title("The Reward Curve", fontproperties='SimSun', size=10.5)
        # 保存图片
        plt.savefig("reward.svg")
        print("draw done!")

    def get_reward(self, delay):
        # 延误40秒对应奖励为0
        refer = 40
        if delay < refer - 10:
            return 10
        elif delay < refer - 8:
            return 8
        elif delay < refer - 6:
            return 6
        elif delay < refer - 4:
            return 4
        elif delay < refer - 2:
            return 2
        elif delay < refer:
            return 0
        elif delay < refer - (-2):
            return -2
        elif delay < refer - (-4):
            return -4
        elif delay < refer - (-6):
            return -6
        elif delay < refer - (-8):
            return -8
        else:
            return -10

    def test(self, env, agent, online_net, target_net):
        # load network model
        agent.online_net.load_state_dict(load(online_net, map_location=device))
        agent.target_net.load_state_dict(load(target_net, map_location=device))

        # initialize parameter
        test_start = time.perf_counter()
        test_total_reward = 0
        test_mean_delay = 0
        test_success = 0
        test_fail = 0
        test_step_count = 0

        # visualization frames
        env.render()

        # reset environment and get initialize state
        observation = env.reset()
        state = observation

        # warm up time
        for i in range(5):
            observation, reward, done, info = env.step(env.action_space.sample())
            state = observation

        # run one episode
        while True:
            # get action
            action = agent.action(state)

            # take action and get next_state
            observation, reward, done, info = env.step(action)
            next_state = observation

            # redefine reward
            delay = reward
            redefine_reward = self.get_reward(delay)

            # judge astringent condition
            if redefine_reward >= 6:
                test_success += 1
            else:
                test_success = 0
            if redefine_reward <= -6:
                test_fail += 1
            else:
                test_fail = 0
            if test_success >= CONVERGENCE or test_fail >= CONVERGENCE:
                done = True

            # update state, reward, mean delay and step count
            state = next_state
            test_total_reward = test_total_reward + redefine_reward
            test_mean_delay = (test_mean_delay + delay) / 2
            test_step_count += 1

            # judgment the end flag
            if done:
                env.close()
                break

        # print the best network's test reward
        print('test_total_reward: {}, step: {}'.format(round(test_total_reward, 3), test_step_count))
        # print the best network's mean delay
        print('test_mean_delay: {}, step: {}'.format(round(test_mean_delay, 3), test_step_count))

        # Print the testing time
        test_time = time.perf_counter() - test_start
        h, ss = divmod(test_time, 3600)
        m, s = divmod(ss, 60)
        print("test time：%d second, that is %d hour, %d minute, %d second" % (test_time, h, m, s))

    def train(self, env, agent):
        # initialize parameter
        train_start = time.perf_counter()
        mean_test = deque(maxlen=1000)
        best_reward = 0
        # train time
        for i_episode in range(EPISODE):
            # start the episode train
            message = "Start of the %d Episode Train" % (i_episode)
            sp = int((178 - len(message)) / 2)
            sim_info = "\n" + " " * sp + "%s" % (str(message)) + "\n"
            self.PrintInfoEmit.emit(sim_info)

            # initialize parameter
            start = time.perf_counter()
            total_reward = 0
            mean_delay = 0
            success = 0
            fail = 0
            step_count = 0

            # visualization frames
            # env.render()

            # reset environment and get initialize state
            observation = env.reset()
            state = observation

            # warm up time
            for i in range(5):
                observation, reward, done, info = env.step(env.action_space.sample())
                state = observation

            # run one episode
            while True:
                # get action
                action = agent.action(state)

                # take action and get next_state
                observation, reward, done, info = env.step(action)
                next_state = observation

                sim_info = "%-4s  %-172s" % (str(step_count), str(info))
                self.PrintInfoEmit.emit(sim_info)

                # redefine reward
                delay = reward
                redefine_reward = self.get_reward(delay)

                # judge astringent condition
                if redefine_reward >= 6:
                    success += 1
                else:
                    success = 0
                if redefine_reward <= -6:
                    fail += 1
                else:
                    fail = 0
                if success >= CONVERGENCE or fail >= CONVERGENCE:
                    done = True

                # store transmission
                agent.store(state, action, redefine_reward, next_state, done)

                # update state, reward, mean delay and step count
                state = next_state
                total_reward = total_reward + redefine_reward
                mean_delay = (mean_delay + delay) / 2
                step_count += 1

                # update network parameter
                if agent.memory_counter > MEMORY_CAPACITY:
                    agent.learn("DQN")

                # save the episode action
                with open('./model/train_episode_action.txt', 'a+') as f:
                    f.write(str(action) + ',')

                # save the episode delay
                with open('./model/train_episode_delay.txt', 'a+') as f:
                    f.write(str(delay) + ',')

                # judgment the end flag
                if done:
                    break

            # print and save the episode total reward
            print('episode: {} , total_reward: {}'.format(i_episode, round(total_reward, 3)))
            with open('./model/train_reward.txt', 'a+') as f:
                f.write(str(total_reward) + '\t' + str(step_count) + '\n')

            # print and save the episode mean delay
            print('episode: {} , mean_delay: {}'.format(i_episode, round(mean_delay, 3)))
            with open('./model/train_delay.txt', 'a+') as f:
                f.write(str(mean_delay) + '\t' + str(step_count) + '\n')

            # save the episode action
            with open('./model/train_episode_action.txt', 'a+') as f:
                f.write('\n')

            # save the episode delay
            with open('./model/train_episode_delay.txt', 'a+') as f:
                f.write('\n')

            # Print the remaining training time
            train_step_time = time.perf_counter() - start
            remain_time = train_step_time * (EPISODE - 1 - i_episode)
            h, ss = divmod(remain_time, 3600)
            m, s = divmod(ss, 60)
            print("train step time：%d second" % train_step_time)
            print("remain time：%d hour, %d minute, %d second" % (h, m, s))
            # info = "预计离训练结束还有%02d时%02d分%02d秒" % (h, m, s)
            # self.TimeInfoEmit.emit(info)

            # test the network performance
            if i_episode > 0 and i_episode % TEST_FREQUENCY == 0:
                # initialize parameter
                test_total_reward = 0
                test_mean_delay = 0
                test_success = 0
                test_fail = 0

                # visualization frames
                # env.render()

                # reset environment and get initialize state
                observation = env.reset()
                state = observation

                # warm up time
                for i in range(5):
                    observation, reward, done, info = env.step(env.action_space.sample())
                    state = observation

                # run one episode
                while True:
                    # get action
                    action = agent.action(state)

                    # take action and get next_state
                    observation, reward, done, info = env.step(action)
                    next_state = observation

                    # redefine reward
                    delay = reward
                    redefine_reward = self.get_reward(delay)

                    # judge astringent condition
                    if redefine_reward >= 6:
                        test_success += 1
                    else:
                        test_success = 0
                    if redefine_reward <= -6:
                        test_fail += 1
                    else:
                        test_fail = 0
                    if test_success >= CONVERGENCE or test_fail >= CONVERGENCE:
                        done = True

                    # update state, test all reward and test mean delay
                    state = next_state
                    test_total_reward = test_total_reward + redefine_reward
                    test_mean_delay = (test_mean_delay + delay) / 2

                    # judgment the end flag
                    if done:
                        break

                # print the episode and test total reward
                print('episode: {} , test_total_reward: {}'.format(i_episode, round(test_total_reward, 3)))
                mean_test.append(test_total_reward)

                # print the episode and test mean delay
                print('episode: {} , test_mean_delay: {}'.format(i_episode, round(test_mean_delay, 3)))

                # save the history best network model
                if np.mean(mean_test) > best_reward:
                    best_reward = np.mean(mean_test)
                    agent.save(i_episode)

        # endthe episode train
        message = "End of the Train"
        sp = int((178 - len(message)) / 2)
        sim_info = "\n" + " " * sp + "%s" % (str(message)) + "\n"
        self.PrintInfoEmit.emit(sim_info)

        # Print the training time
        train_time = time.perf_counter() - train_start
        h, ss = divmod(train_time, 3600)
        m, s = divmod(ss, 60)
        print("train time：%d second, that is %d hour, %d minute, %d second" % (train_time, h, m, s))

    def backup_model(self, source, target):
        # move all model files to backup dictionary
        for root, dirs, files in os.walk(source):
            if len(files) > 0:
                # get system time
                now = time.strftime("%Y-%m-%d-%H_%M", time.localtime(time.time()))
                # create backup dictionary
                path = target + now + '/'
                if os.path.exists(path):
                    pass
                else:
                    os.makedirs(path)
                # move files
                for file in files:
                    move(os.path.join(root, file), path)

    def run(self, net_path, simulation, timming):
        print("训练平台为: ", device)
        path_list = ['./model', './backup', './result', './other']
        for path in path_list:
            if os.path.exists(path):
                pass
            else:
                os.mkdir(path)
        # create simulation environment
        env = self.creat_environment(net_path, simulation, timming)
        # record the program start time
        program_start = time.perf_counter()
        # backup the already exist model
        self.backup_model('./model/', './backup/')
        # define agent
        my_agent = ag.Agent()
        # train agent
        self.train(env, my_agent)
        # test the best network performance
        self.test(env, my_agent, './model/online_network_best.pkl', './model/target_network_best.pkl')
        # record the program end time
        program_end = time.perf_counter()
        # print the program run time
        print("program run time:%d" % (program_end - program_start))
        # draw the reward curve
        self.darw_reward('./model/train_reward.txt')
        # draw the dalay curve
        self.darw_delay('./model/train_delay.txt')
