from collections import deque
import os
import random
from tqdm import tqdm

import torch

from utils_drl import Agent
from utils_env import MyEnv
from utils_memory import ReplayMemory


GAMMA = 0.99                #衰减值gamma
GLOBAL_SEED = 0             #用于生成随机数的seed
MEM_SIZE = 100_000
RENDER = True              #生成图片...最后合成视频。所以要录制视频需要置为true
SAVE_PREFIX = "./models"    #文件前缀
STACK_SIZE = 4

EPS_START = 0.7              #一开始是完全随机策略，一直往下减
EPS_END = 0.05             #最小到达0.1的概率采取随机策略
EPS_DECAY = 1000000        #100万次后，EPS=0.1

BATCH_SIZE = 32
POLICY_UPDATE = 4          #策略更新的频率
TARGET_UPDATE = 10_000     #固定目标更新的频率
WARM_STEPS = 50000       #warm up过程
MAX_STEPS = 10_000_000     #总的步数1_000_000
EVALUATE_FREQ = 500_00    #频率，多久保存一次reward和model 10_000_00

rand = random.Random()
rand.seed(GLOBAL_SEED)
new_seed = lambda: rand.randint(0, 1000_000)
os.mkdir(SAVE_PREFIX)        #新建文件夹

torch.manual_seed(new_seed()) #设置随机种子
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #使用的设备时cuda或者是CPU
env = MyEnv(device)
agent = Agent(
    env.get_action_dim(),
    device,
    GAMMA,
    new_seed(),
    EPS_START,
    EPS_END,
    EPS_DECAY,
    restore="./model_019"
)    #使用Dueling DQN算法，实现的Agent
memory = ReplayMemory(STACK_SIZE + 1, MEM_SIZE, device)    #经验池

#### Training ####
obs_queue: deque = deque(maxlen=5)
done = True

progressive = tqdm(range(MAX_STEPS), total=MAX_STEPS,
                   ncols=50, leave=False, unit="b")    #tqdm进度条
for step in progressive:
    if done:#进行了一次评估
        observations, _, _ = env.reset()  #重置环境
        for obs in observations:
            obs_queue.append(obs)         

    training = len(memory) > WARM_STEPS 
    state = env.make_state(obs_queue).to(device).float()
    action = agent.run(state, training)
    obs, reward, done = env.step(action)
    obs_queue.append(obs)
    memory.push(env.make_folded_state(obs_queue), action, reward, done)     #往经验池中放入新的数据, obs， 动作，奖励，是否成功

    #更新策略
    if step % POLICY_UPDATE == 0 and training:
        agent.learn(memory, BATCH_SIZE)

    #更新并固定目标target
    if step % TARGET_UPDATE == 0:
        agent.sync()

    if step % EVALUATE_FREQ == 0:
        avg_reward, frames = env.evaluate(obs_queue, agent, render=RENDER)
        with open("rewards.txt", "a") as fp:
            fp.write(f"{step//EVALUATE_FREQ:3d} {step:8d} {avg_reward:.1f}\n")
        if RENDER:    #如果render=true，则会有单时间就保存图片
            prefix = f"eval_{step//EVALUATE_FREQ:03d}"
            os.mkdir(prefix)
            for ind, frame in enumerate(frames):
                with open(os.path.join(prefix, f"{ind:06d}.png"), "wb") as fp:
                    frame.save(fp, format="png")
        agent.save(os.path.join(
            SAVE_PREFIX, f"model_{step//EVALUATE_FREQ:03d}"))    #把训练的模型保存到model文件夹
        done = True
