from model.DQN_Model import *
from env.environment import *
import matplotlib.pyplot as plt
from tqdm import tqdm
from logger import Logger
from config.config import config
from utils import *


# path
cf = config()
log_path = cf.getConfig('path', 'log_path')
pic_path = cf.getConfig('path', 'picture_path')
mod_path = cf.getConfig('path', 'model_path')

# log
log = Logger(log_path + 'run.log')
log.write("Run.py从这又重新启动一次", '')
pseudorandom_log = Logger(log_path + 'pseudorandom.log')
pseudorandom_log.write("Run.py从这又重新启动一次", '')

# train
TrainEpoches = cf.getConfig('train', 'TrainEpoches')
MaxStep = cf.getConfig('train', 'MaxStep')


def train():
    state = env.reset()
    while (len(dqn.memory.memory)) <= 32:
        action, random_flag = dqn.choose_action(state, env, epoch=-1)  # 如果发生了前64条都是一样的transition，那么可以将这里定义为随机生成
        nextState, reward, reset, _ = env.step(action)

        print("Run里面计算出来的奖赏", reward)
        if reset:  # 宕机需要重启，Q值小了
            state = env.reset()
            continue
        elif nextState and not reset:  # 不宕机：环境不为空，也不需要重启
            if dqn.memory.IsInMemory(state=state, action=action):
                pass
            else:
                dqn.store_transition(state=state, action=action, reward=reward, next_state=nextState)
        else:  # 黑名单 或者宕机返回到上一状态，需要重新选择动作
            while not nextState:
                action = dqn.RandomChooseAction()
                nextState, reward, reset, _ = env.step(action)
            if dqn.memory.IsInMemory(state=state, action=action):
                pass
            else:
                dqn.store_transition(state=state, action=action, reward=reward, next_state=nextState)
        state = nextState

    for i in tqdm(range(TrainEpoches)):
        log.write("********训练批次为：", i)
        state = env.reset()
        scores = env.getScore_list()
        for step in range(MaxStep):
            log.write_list("当前状态为：", list=state)
            log.write_list("the scores is :", list=scores)
            action, random_flag = dqn.choose_action(state, env, epoch=i)
            log.write_list("模型计算出来的修改位是：", list=action)
            nextState, reward, reset, done = env.step(action)
            scores = env.getScore_list()
            if random_flag:
                pseudorandom_log.write_list("第 %d 次伪随机修改第 %d 个寄存器，得到的Action为："%(dqn.random_time, (dqn.random_time - 1) % env.getRegisterNum()), action)
                pseudorandom_log.write_list("第 %d 次伪随机之后的BIOS设置为："%dqn.random_time, nextState)
                pseudorandom_log.write("第 %d 次伪随机之后的reward为："%dqn.random_time, reward)
            if done:
                log.write("训练中到达了目标，返回初始状态", '!')
                for j in range(40):
                    dqn.store_transition(state=state, action=action, reward=reward, next_state=nextState)
                dqn.learn()
                break
            log.write("Run里面计算出来的奖赏", reward)
            if reset:  # 宕机需要重启，Q值小了
                log.write("宕机重启，而且返回初始状态", '!')
                state = env.reset()
                scores = env.getScore_list()
                continue
            elif nextState and not reset:  # 不宕机：环境不为空，也不需要重启
                dqn.store_transition(state=state, action=action, reward=reward, next_state=nextState)
            else:  # 黑名单 或者宕机返回到上一状态，需要重新选择动作
                while not nextState:
                    log.write("训练时重新选择了动作", '!')
                    action = dqn.RandomChooseAction()
                    log.write_list("重新选择的动作的修改位是：", list=action)
                    nextState, reward, reset, _ = env.step(action)
                    log.write("重新选择动作后Run里面计算出来的奖赏", reward)
                dqn.store_transition(state=state, action=action, reward=reward, next_state=nextState)
            state = nextState
            dqn.learn()
        if i % 50 == 0 and i > 1:
            plt.plot(range(len(dqn.LossList)), dqn.LossList)
            plt.savefig(pic_path + 'loss_curve.png', bbox_inches='tight')
            dqn.SaveModel(mod_path + '/DqnEvalModel_%d.pkl'%i)
            plt.close()
            optimize(optimize_step=20, epoch=i)

def optimize(optimize_step = 10, epoch = 0):
    log.write("开始执行optimize函数，当前epoch为：", epoch)
    state = env.reset()
    scores = env.getScore_list()
    ScoreList = []
    ScoreList.append(sum(scores[-4:]))
    L1 = []
    L2 = []
    L3 = []
    L4 = []
    L1.append(scores[-4])
    L2.append(scores[-3])
    L3.append(scores[-2])
    L4.append(scores[-1])
    for i in range(optimize_step):
        log.write_list("当前环境env状态为：", list=state)
        log.write_list("the scores is :", list=scores)
        action, _ = dqn.choose_action(state, epoch=-1, epsilon=1.0)
        log.write_list("模型选择出来的动作修改位是：", list=action)
        nextState, reward, reset, done = env.step(action)
        scores = env.getScore_list()
        log.write("Run里面计算出来的奖赏:", reward)
        if done:
            log.write("优化到目标分数：", sum(nextState[-4:]))
            ScoreList.append(sum(nextState[-4:]))
            L1.append(nextState[-4])
            L2.append(nextState[-3])
            L3.append(nextState[-2])
            L4.append(nextState[-1])
            break
        if not nextState:
            log.write("优化过程中出现了宕机", '!!')
            break
        score = sum(scores[-4:])
        log.write("到达了跑分：", score)
        ScoreList.append(score)
        L1.append(scores[-4])
        L2.append(scores[-3])
        L3.append(scores[-2])
        L4.append(scores[-1])
        state = nextState
    plt.plot(range(len(ScoreList)), ScoreList)
    plt.savefig(pic_path + 'optimize_curve_%d.png'%epoch, bbox_inches='tight')
    plt.close()
    plt.plot(range(len(L1)), L1, label="1")
    plt.plot(range(len(L2)), L2, label="2")
    plt.plot(range(len(L3)), L3, label="3")
    plt.plot(range(len(L4)), L4, label="4")
    plt.legend()
    plt.savefig(pic_path + '各项指标下的分数优化曲线_%d.png' % epoch, bbox_inches ='tight')
    plt.close()
    log.write("optimize函数执行结束，绘制完了优化曲线_", epoch)


if __name__ == "__main__":
    env = Env()
    state_size = env.getStateLen()
    
    dqn = DQN(N_STATES=state_size, N_ACTIONS=state_size)
    
    train()
      