#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/7/24 10:01
# @Author  : LiShan
# @Email   : lishan_1997@126.com
# @File    : no_interface_no_accelerate.py
# @Note    : this is note

import os
import string
import time

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from torch import cuda, load
from torch.backends import cudnn

import agent.dqn as ag
from environment.vissim import VisCom

# 判断是否安装了cuda
device = 'cpu'
# 判断是否安装了cuDNN
CUDNN = cudnn.is_available()

EPSILON_MAX = 0.99  # greedy 最大贪婪策略
EPSILON_MIN = 0.05  # greedy 最小贪婪策略
LR_MAX = 0.01  # 最大学习率
LR_MIN = 1e-5  # 最小学习率
EPISODE = 21  # 训练回合数
MAX_STEP = 42  # 最大回合步长
TEST_STEP = MAX_STEP  # 测试步长
TEST_FREQUENCY = 10 / EPISODE  # 测试频率
ZEROREWARD = 38.509  # 延误 == 39.337s  <-->  奖励 == 0
CONVERGENCE_UP = 4  # 奖励收敛上限  (range:-10~10)
CONVERGENCE_LOW = -4  # 奖励收敛下限  (range:-10~10)
CONVERGENCE = int(MAX_STEP * 0.20) + 1  # 收敛计数器

# 超参数
LR = LR_MAX  # 学习率
EPSILON = EPSILON_MAX  # greedy 贪婪策略
GAMMA = 0.95  # 奖赏折扣
BATCH_SIZE = 32  # 批处理大小
UPDATE_STEP = 50  # 目标网络更新步长
MEMORY_CAPACITY = MAX_STEP * 1  # 存储池容量
EPSILON_DAMPING = (EPSILON_MIN / EPSILON) ** (1 / EPISODE)  # 探索衰减因子
LR_DAMPING = (LR_MIN / LR) ** (1 / EPISODE)  # 学习衰减因子

N_ACTIONS = 20
N_STATES = 24
ENV_A_SHAPE = 0
NODE = 100

ALGORITHM = "DQN"
LOSS = "SmoothL1Loss"
OPTIM = "Adam"
ACTIVATE = "relu"


plan_file = "./fix/txt/test_fix_plans_define.txt"
train_file = './model/txt/train_record.txt'
train_test_file = './model/txt/test_record.txt'
drl_test_file = './model/txt/test_drl_record.txt'
status_file = './model/txt/test_status_record.txt'
online_network = './model/pkl/online_network_best.pkl'
target_network = './model/pkl/target_network_best.pkl'

# 配时方案保存路径
plans_file = "./model/txt/test_fix_plans.txt"
# 当前项目文件路径
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# vssim仿真路网文件路径
net_path = project_path.replace("\\", "/") + "/resource/vissim/net/net.inp"
# vissim仿真设置: 最大回合仿真时长，仿真速度、模拟分辨率、控制频率、图图形模式、3D模式
simulation = [999999, 0, 5, 1, 42, True, False]
# 信号配时设置: 信号周期、最短绿时、最长绿时、绿时变化间隔、黄灯时间、各相位全红时间
timming = [170, 41, 57, 2, 3, [2, 2, 2]]


# CPU电源模式配置(开启高性能模式)
def power_config(index=2):
    import subprocess
    mode = [
        "a1841308-3541-4fab-bc81-f71556f20b4a",  # 节能
        "381b4222-f694-41f0-9685-ff5bb260df2e",  # 平衡
        "8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c",  # 高性能
        "e9a42b02-d5df-448d-aa00-03f14749eb61",  # 卓越
    ]
    subprocess.call("Powercfg -s %s" % mode[index])


# 检查文件路径
def check_path():
    path_list = ['./model', './model/pkl', './model/png', './model/txt', './backup', './result', './other']
    for path in path_list:
        if os.path.exists(path):
            pass
        else:
            os.mkdir(path)


# 生成配时方案
def create_plans(para, file, plan_num=-1):
    # 配时范围
    cycle_time = para[0]
    green_low = para[1]
    green_high = para[2]
    green_interval = para[3]
    amber_time = para[4]
    clearing_time = para[5]
    phase_num = len(clearing_time)
    loss_time = amber_time * phase_num + sum(clearing_time)
    try:
        with open(file, "a+") as f:
            f.truncate(0)
    except (IndexError, Exception):
        pass
    from itertools import permutations
    a = list(permutations(range(green_low, green_high + 1, green_interval), phase_num))
    plans = []
    unused_plan = 0
    for i in range(len(a)):
        if sum(a[i]) == cycle_time - loss_time:
            if plan_num < 0 or len(plans) < plan_num:
                plan = [cycle_time, amber_time, clearing_time, list(a[i])]
                plans.append(plan)
                with open(file, "a+") as f:
                    line = "%s\n" % (str(plan))
                    f.write(line)
            else:
                unused_plan += 1
    return plans, unused_plan


# 读取配时方案
def read_plans(file):
    plans = []
    with open(file, "r") as f:
        data = f.read()
        data = data.split("\n")
        for i in range(len(data)):
            if data[i] != "":
                data[i] = eval(data[i])
                plans.append(data[i])
    return plans

# 获取vissim环境
def get_vissim_env(net_path, simulation, plans):
    # 启动vissim
    path = run_vissim()
    if path is not None:
        # 初始化vissim环境
        env = VisCom(path, net_path, simulation, plans)
        return env
    else:
        print("初始化环境失败")


# 获取磁盘列表
def get_disklist():
    disk_list = []
    for c in string.ascii_uppercase:
        disk = c + ':'
        if os.path.isdir(disk):
            disk_list.append(disk)
    return disk_list


# 搜索文件路径
def search_file(path):
    for root, dirs, files in os.walk(path):
        for f in files:
            file_path = os.path.abspath(os.path.join(root, f))
            if file_path.endswith("vissim.exe"):
                return file_path
    return None


# 运行vissim软件
def run_vissim():
    # path = r"E:\Program Files (x86)\PTV_Vision\VISSIM430\Exe\vissim.exe"
    start = time.perf_counter()
    project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    path = None
    for disk in get_disklist():
        path = search_file(disk + r"\Program Files (x86)\PTV_Vision")
        if path is not None:
            command = r'%s\RunAsDate.exe 01\01\2008 00:04:10 "%s"' % (project_path, path)
            os.system(command)
            break
        else:
            pass
    else:
        print("未找到vissim软件")
    end = time.perf_counter()
    print("Vissim仿真启动时间：%.2f" % (end - start))
    return path


# 智能体参数重赋值
def agent_revalue():
    ag.device = device
    ag.BATCH_SIZE = BATCH_SIZE
    ag.LR = LR
    ag.LR_MIN = LR_MIN
    ag.EPSILON = EPSILON
    ag.GAMMA = GAMMA
    ag.UPDATE_STEP = UPDATE_STEP
    ag.MEMORY_CAPACITY = MEMORY_CAPACITY
    ag.N_ACTIONS = N_ACTIONS
    ag.N_STATES = N_STATES
    ag.ENV_A_SHAPE = ENV_A_SHAPE
    ag.NODE = NODE


# 奖赏函数1
def get_reward_1(delay, ZEROREWARD):
    if delay < ZEROREWARD - 10:
        return 10
    elif delay < ZEROREWARD - 9:
        return 9
    elif delay < ZEROREWARD - 8:
        return 8
    elif delay < ZEROREWARD - 7:
        return 7
    elif delay < ZEROREWARD - 6:
        return 6
    elif delay < ZEROREWARD - 5:
        return 5
    elif delay < ZEROREWARD - 4:
        return 4
    elif delay < ZEROREWARD - 3:
        return 3
    elif delay < ZEROREWARD - 2:
        return 2
    elif delay < ZEROREWARD - 1:
        return 1
    elif delay < ZEROREWARD:
        return 0
    elif delay < ZEROREWARD - (-1):
        return -1
    elif delay < ZEROREWARD - (-2):
        return -2
    elif delay < ZEROREWARD - (-3):
        return -3
    elif delay < ZEROREWARD - (-4):
        return -4
    elif delay < ZEROREWARD - (-5):
        return -5
    elif delay < ZEROREWARD - (-6):
        return -6
    elif delay < ZEROREWARD - (-7):
        return -7
    elif delay < ZEROREWARD - (-8):
        return -8
    elif delay < ZEROREWARD - (-9):
        return -9
    else:
        return -10


# 奖赏函数2
def get_reward_2(delay, ZEROREWARD):
    return round(ZEROREWARD - delay, 3)


# 保存训练日志
def save_log(env, net_path, simulation, plans):
    now = time.strftime("time:%Y-%m-%d-%H:%M:%S\n", time.localtime(time.time()))
    content = ""
    content += now
    content += "net_path:{}\n".format(net_path)
    content += "simulation:{}\n".format(simulation)
    content += "plans:\n"
    for i in range(len(plans)):
        content += str(plans[i])
        content += "\n"
    content += "device:{}\n".format(device)
    content += "LR:{}\n".format(LR)
    content += "LR_MIN:{}\n".format(LR_MIN)
    content += "LR_MAX:{}\n".format(LR_MAX)
    content += "EPSILON:{}\n".format(EPSILON)
    content += "EPSILON_MIN:{}\n".format(EPSILON_MIN)
    content += "EPSILON_MAX:{}\n".format(EPSILON_MAX)
    content += "GAMMA:{}\n".format(GAMMA)
    content += "EPISODE:{}\n".format(EPISODE)
    content += "MEMORY_CAPACITY:{}\n".format(MEMORY_CAPACITY)
    content += "BATCH_SIZE:{}\n".format(BATCH_SIZE)
    content += "UPDATE_STEP:{}\n".format(UPDATE_STEP)
    content += "TEST_FREQUENCY:{}\n".format(TEST_FREQUENCY)
    content += "CONVERGENCE_UP:{}\n".format(CONVERGENCE_UP)
    content += "CONVERGENCE_LOW:{}\n".format(CONVERGENCE_LOW)
    content += "ZEROREWARD:{}\n".format(ZEROREWARD)
    content += "CONVERGENCE:{}\n".format(CONVERGENCE)
    content += "ALGORITHM:{}\n".format(ALGORITHM)
    content += "LOSS:{}\n".format(LOSS)
    content += "OPTIM:{}\n".format(OPTIM)
    content += "ACTIVATE:{}\n".format(ACTIVATE)
    content += "N_ACTIONS:{}\n".format(N_ACTIONS)
    content += "N_STATES:{}\n".format(N_STATES)
    content += "ENV_A_SHAPE:{}\n".format(ENV_A_SHAPE)
    content += "NODE:{}\n".format(NODE)
    with open('./model/txt/tarin_log.txt', 'w') as f:
        f.write(content)


# 创建仿真环境
def creat_environment(net_path, simulation, plans):
    from gym.spaces import Box, Discrete
    env = get_vissim_env(net_path, simulation, plans)
    # 动作空间描述
    env.action_space = Discrete(len(plans))
    # 状态空间描述
    global N_ACTIONS, N_STATES
    N_STATES = len(env.reset())
    N_ACTIONS = env.action_space.n
    low = np.array([0 for _ in range(N_STATES)], dtype=np.float32)
    high = np.array([1000 for _ in range(N_STATES)], dtype=np.float32)
    env.observation_space = Box(low, high, dtype=np.float32)
    ag.N_STATES = env.observation_space.shape[0]
    ag.N_ACTIONS = N_ACTIONS
    if isinstance(env.action_space.sample(), int):
        ag.ENV_A_SHAPE = 0
    else:
        ag.ENV_A_SHAPE = env.action_space.sample().shape
    return env


# 可视化绘制训练数据记录文件
def darw_train_record(file1, file2):
    font = {'family': 'SimSun',
            'weight': 'bold',
            'size': '16'}
    plt.rc('font', **font)
    plt.rc('axes', unicode_minus=False)
    # plt.rcParams['figure.facecolor'] = "#FFFFF0"  # 设置窗体颜色
    # plt.rcParams['axes.facecolor'] = "#FFFFF0"  # 设置绘图区颜色
    # 绘制训练曲线
    names = ["episode", "step", "epsilon", "learn_rate", "convergence", "delay", "reward", "loss"]
    data = pd.read_csv(file1, sep="\s+", names=names)
    delay = list(data["delay"].values)
    reward = list(data["reward"].values)
    loss = list(data["loss"].values)
    convergence = list(data["convergence"].values)
    epsilon = list(data["epsilon"].values)
    learn_rate = list(data["learn_rate"].values)
    """延误"""
    # 绘图
    x = np.linspace(0, len(delay), len(delay))
    plt.plot(x, delay, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Delay", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["delay"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_delay.png", dpi=600)
    # 关闭绘图
    plt.close()
    """奖赏"""
    # 绘图
    x = np.linspace(0, len(reward), len(reward))
    plt.plot(x, reward, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Reward", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["reward"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_reward.png", dpi=600)
    # 关闭绘图
    plt.close()
    """损失"""
    # 绘图
    idx = 0
    for i in range(len(loss)):
        if loss[i] != 0:
            idx = i
            break
    loss = loss[idx::]
    x = np.linspace(0, len(loss), len(loss))
    plt.plot(x, loss, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Loss", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["loss"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_loss.png", dpi=600)
    # 关闭绘图
    plt.close()
    """收敛计数器"""
    # 绘图
    x = np.linspace(0, len(convergence), len(convergence))
    plt.plot(x, convergence, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Convergence", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["convergence"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_convergence.png", dpi=600)
    # 关闭绘图
    plt.close()
    """探索率"""
    # 绘图
    x = np.linspace(0, len(epsilon), len(epsilon))
    plt.plot(x, epsilon, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Epsilon", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["epsilon"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_epsilon.png", dpi=600)
    # 关闭绘图
    plt.close()
    """学习率"""
    # 绘图
    x = np.linspace(0, len(learn_rate), len(learn_rate))
    plt.plot(x, learn_rate, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Learn_Rate", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["learn_rate"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_learn_rate.png", dpi=600)
    # 关闭绘图
    plt.close()

    # 绘制训练测试曲线
    names = ["episode", "step", "delay", "reward"]
    data = pd.read_csv(file2, sep="\s+", names=names)
    delay = list(data["delay"].values)
    reward = list(data["reward"].values)
    """延误"""
    # 绘图
    x = np.linspace(0, len(delay), len(delay))
    plt.plot(x, delay, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Delay", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["delay"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_test_delay.png", dpi=600)
    # 关闭绘图
    plt.close()
    """奖赏"""
    # 绘图
    x = np.linspace(0, len(reward), len(reward))
    plt.plot(x, reward, "k")
    # 设置坐标轴名称
    plt.xlabel("Episode", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Reward", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["reward"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/train_test_reward.png", dpi=600)
    # 关闭绘图
    plt.close()


# 可视化绘制DRL测试数据记录文件
def darw_test_record(file3):
    font = {'family': 'SimSun',
            'weight': 'bold',
            'size': '16'}
    plt.rc('font', **font)
    plt.rc('axes', unicode_minus=False)
    # plt.rcParams['figure.facecolor'] = "#FFFFF0"  # 设置窗体颜色
    # plt.rcParams['axes.facecolor'] = "#FFFFF0"  # 设置绘图区颜色
    # 绘制DRL训练结果测试曲线
    names = ["step", "plan", "delay", "reward"]
    data = pd.read_csv(file3, sep="\s+", names=names)
    delay = list(data["delay"].values)
    """延误"""
    # 绘图
    x = np.linspace(0, len(delay), len(delay))
    mean_delay = np.mean(delay)
    plt.plot(x, delay, color='black', marker='D', linestyle='-', linewidth='1.0')
    plt.plot(x, [mean_delay for _ in range(len(x))], color='gray', linestyle='--')
    # 设置坐标轴名称
    plt.xlabel("Step", fontproperties="Times New Roman", size=10.5)
    plt.ylabel("Delay", fontproperties="Times New Roman", size=10.5)
    # 设置图例
    legend = ["drl delay", "mean delay line"]
    plt.legend(legend, loc="best", frameon=False)
    # 保存图片
    plt.savefig("./model/png/drl_test_delay.png", dpi=600)
    # 关闭绘图
    plt.close()


# 测试智能体
def test(env, agent, online_net, target_net):
    # 加载目标网络
    agent.online_net.load_state_dict(load(online_net, map_location=device))
    agent.target_net.load_state_dict(load(target_net, map_location=device))
    # 初始化参数
    test_start = time.perf_counter()
    test_delay_record = []
    test_rewrad_record = []
    # 重置环境获取初始交通流状态
    state = env.reset()
    # env.render()
    # 热身时间
    for i in range(5):
        state, reward, done, info = env.step(0)
    # 仿真运行指定个周期
    for step in range(TEST_STEP):
        # 智能体由交通流状态获取配时动作方案
        action = agent.action(state, random=False)
        # vissim环境采取动作运行一周期，得到下一周期的状态信息
        next_state, reward, done, info = env.step(action)
        # 重定义奖励
        delay = reward
        redefine_reward = get_reward_2(delay, ZEROREWARD)
        # 更新状态、奖励、平均延误、当前回合仿真步数
        state = next_state
        test_delay_record.append(delay)
        test_rewrad_record.append(redefine_reward)
        # 保存状态信息
        with open(status_file, 'a+') as f:
            record = "%s\t\n" % (state)
            f.write(record)
        # 保存测试记录信息
        with open(drl_test_file, 'a+') as f:
            record = "%-5s\t%-5s\t%-5s\t%-5s\t\n" % (str(step + 1), str(action + 1), str(delay), str(redefine_reward))
            f.write(record)
    # 输出最佳网络的测试奖励值和延误值
    test_mean_delay = sum(test_delay_record) / len(test_delay_record)
    test_mean_reward = sum(test_rewrad_record) / len(test_rewrad_record)
    eva_info = 'test step: {}, test_mean_delay: {}, test_mean_reward: {}'. \
        format(TEST_STEP, round(test_mean_delay, 3), round(test_mean_reward, 3))
    print(eva_info)
    # 输出测试时间
    test_time = time.perf_counter() - test_start
    h, ss = divmod(test_time, 3600)
    m, s = divmod(ss, 60)
    eva_info = "complete test time: {} second, that is {} hour, {} minute, {} second".format(test_time, h, m, s)
    print(eva_info)


# 训练智能体
def train(env, agent):
    # 保存初始网络
    agent.save('./model/pkl/')
    best_reward = -10
    # 开始训练
    for episode in range(EPISODE):
        # 启动当前回合训练
        episode += 1
        start = time.perf_counter()
        delay_record = []
        rewrad_record = []
        loss_record = []
        loss = 0
        success = 0
        max_sucess = 0
        fail = 0
        step_count = 0
        convergence_test = False
        state = env.reset()
        # env.render()
        # 热身时间
        for i in range(5):
            state, reward, done, info = env.step(0)
        # 运行当前回合
        for step in range(MAX_STEP):
            # 由交通流状态获取配时动作方案
            action = agent.action(state)
            # vissim环境采取动作运行一周期，得到下一周期的状态信息
            next_state, reward, done, info = env.step(action)
            # 重定义奖励
            delay = reward
            redefine_reward = get_reward_2(delay, ZEROREWARD)
            # 判断收敛条件
            if redefine_reward >= CONVERGENCE_UP:
                success += 1
                if success > max_sucess:
                    max_sucess = success
            else:
                success = 0
            if success >= CONVERGENCE:
                convergence_test = True
                done = True
            if redefine_reward <= CONVERGENCE_LOW:
                fail += 1
            else:
                fail = 0
            if fail >= CONVERGENCE:
                done = True
            # 存储样本到经验池
            agent.store(state, action, redefine_reward, next_state, done)
            # 智能体进行学习
            if agent.memory_counter > MEMORY_CAPACITY:
                loss = agent.learn(ALGORITHM)
            # 更新状态、奖励、平均延误、当前回合训练步数
            state = next_state
            loss_record.append(loss)
            delay_record.append(delay)
            rewrad_record.append(redefine_reward)
            step_count += 1
            # 判断当前回合仿真结束标志
            if done:
                break
        # 输出并保存当前回合数、回合总步数、探索概率、学习率、平均奖励、平均延误、平均损失、最大收敛次数
        mean_delay = sum(delay_record) / len(delay_record)
        mean_reward = sum(rewrad_record) / len(rewrad_record)
        mean_loss = sum(loss_record) / len(loss_record)
        eva_info = "episode: {}, step: {}, epsilon: {}, lr: {}, convergence: {}, delay: {}, reward: {}, loss: {}". \
            format(episode, step_count, ag.EPSILON, ag.LR, max_sucess,
                   round(mean_delay, 3),
                   round(mean_reward, 3),
                   round(mean_loss, 3))
        print(eva_info)
        # 保存训练回合记录文件
        with open(train_file, 'a+') as f:
            record = "%-5s\t%-5s\t%-5s\t%-5s\t%-5s\t%-5s\t%-5s\t%-5s\t\n" % \
                     (str(episode), str(step_count), str(round(ag.EPSILON, 3)), str(round(ag.LR, 3)), str(max_sucess),
                      str(round(mean_delay, 3)),
                      str(round(mean_reward, 3)),
                      str(round(mean_loss, 3)))
            f.write(record)
        # 逐渐衰减探索率
        if ag.EPSILON > EPSILON_MIN:
            # 衰减方法一  指数衰减
            # ag.EPSILON *= EPSILON_DAMPING
            # 衰减方法二  余弦衰减
            x = episode / EPISODE * np.pi
            y = EPSILON_MIN + (np.cos(x) + 1) / 2 * (EPSILON_MAX - EPSILON_MIN)
            ag.EPSILON = y
        # 输出预计剩余训练时间
        train_episode_time = time.perf_counter() - start
        remain_time = train_episode_time * int((EPISODE - episode) * (1 + TEST_FREQUENCY))
        h, ss = divmod(remain_time, 3600)
        m, s = divmod(ss, 60)
        eva_info = "episode {} train time: {} second, remain simulation time: {} hour, {} minute, {} second". \
            format(episode, train_episode_time, h, m, s)
        print(eva_info)
        with open("no_interface_no_accelerate.txt", 'a+') as f:
            record = "%.3f\n" % (train_episode_time)
            f.write(record)
        # 测试网络性能
        c1 = convergence_test
        c2 = (episode % max(1, int(EPISODE * TEST_FREQUENCY)) == 0)
        c3 = (episode == EPISODE)
        c4 = (episode == 1)
        if c1 or c2 or c3 or c4:
            # 初始化参数
            test_delay_record = []
            test_rewrad_record = []
            # 重启环境并获取初始交通流状态
            state = env.reset()
            # env.render()
            # 热身时间
            for i in range(5):
                state, reward, done, info = env.step(0)
            # 运行指定个仿真周期
            for step in range(TEST_STEP):
                # 由交通流状态获取配时动作方案
                action = agent.action(state, random=False)
                # vissim环境采取动作运行一周期，得到下一周期的状态信息
                next_state, reward, done, info = env.step(action)
                # 重定义奖励
                delay = reward
                redefine_reward = get_reward_2(delay, ZEROREWARD)
                test_delay_record.append(delay)
                test_rewrad_record.append(redefine_reward)
                # 更新状态
                state = next_state
            # 输出当前回合、测试平均延误、测试平均奖励
            test_mean_delay = sum(test_delay_record) / len(test_delay_record)
            test_mean_reward = sum(test_rewrad_record) / len(test_rewrad_record)
            eva_info = 'episode: {}, test_mean_delay: {}, test_mean_reward: {}'. \
                format(episode, round(test_mean_delay, 3), round(test_mean_reward, 3))
            print(eva_info)
            # 保存测试回合记录文件
            with open(train_test_file, 'a+') as f:
                record = "%-5s\t%-5s\t%-5s\t%-5s\t\n" % \
                         (str(episode), str(TEST_STEP),
                          str(round(test_mean_delay, 3)),
                          str(round(test_mean_reward, 3)))
                f.write(record)
            # 保存历史训练最优网络模型
            if test_mean_reward > best_reward:
                best_reward = test_mean_reward
                agent.save('./model/pkl/', episode)
            # 绘制延误、奖赏曲线
            darw_train_record(train_file, train_test_file)


# 运行仿真
def run(net_path, simulation, plans):
    # 创建vissim仿真训练环境
    env = creat_environment(net_path, simulation, plans)
    # 保存训练日志
    save_log(env, net_path, simulation, plans)
    # 记录程序启动时间
    program_start = time.perf_counter()
    # 重赋值智能体参数
    agent_revalue()
    # 定义智能体
    my_agent = ag.Agent()
    # 训练智能体
    train(env, my_agent)
    # 测试最佳训练网络的性能
    test(env, my_agent, online_network, target_network)
    # 绘制延误、奖赏曲线
    darw_train_record(train_file, train_test_file)
    darw_test_record(drl_test_file)
    # 记录程序结束时间
    program_end = time.perf_counter()
    # 输出程序运行时间
    eva_info = "program run time:%d" % (program_end - program_start)
    print(eva_info)


if __name__ == '__main__':
    power_config(1)
    check_path()
    plans, _ = create_plans(timming, plans_file)
    # plans = read_plans(plan_file)
    print("共训练%d种方案" % len(plans))
    run(net_path, simulation, plans)
    # darw_train_record(train_file, train_test_file)
    # darw_test_record(drl_test_file)
