"""
训练模型
"""
import time
import traceback

from parl.algorithms import DDQN

from Examples.DQNExamples.HollowKnight.Agent import Agent
from Examples.DQNExamples.HollowKnight.HKEnv import HKEnv
from Examples.DQNExamples.HollowKnight.Model import Model
from Examples.DQNExamples.HollowKnight.RPM import RPM
from Examples.DQNExamples.HollowKnight.Train import Training

MEMORY_SIZE = 10000  # 记忆库大小
BATCH_SIZE = 32  # 批大小
GAMMA = 0.9  # 奖励衰减率
LEARNING_RATE = 0.00001  # 学习率
E_GREED = 0.1  # 贪心率
E_GREED_DECREMENT = 5e-6  # 贪心率衰减
UPDATE_TARGET_STEPS = 200  # 更新目标网络的步数
MAX_EPISODES = 10000  # 最大回合数
MAX_STEPS = 20000  # 训练每回合最大步数, 避免训练时间过长
MAX_TEST_STEPS = 20000  # 测试每回合最大步数, 避免时间过长
MEMORY_WARMUP_SIZE = 640  # replay memory预热大小, 需要预存一些经验数据后才开始训练
TEST_FREQ = 20  # 测试的频率, 每隔多少回合测试一次
FRAME_SKIPPING = 1  # 每隔多少帧采样一次
TARGET_UPDATE_FREQ = 5  # 每隔多次训练更新一次target_net
DUELING = True


# 创建环境
env = HKEnv()

# 获取动作维度和状态维度
act_dim = 12
obs_dim = (1, 8)

# 经验回放池
start = time.time()
rmp = RPM(MEMORY_SIZE, obs_dim, 4)
end = time.time()
print("经验回放池创建耗时: ", end - start)

# 创建模型
start = time.time()
model = Model(act_dim=act_dim, obs_dim=obs_dim, dueling=True)
end = time.time()
print("模型创建耗时: ", end - start)

# 创建算法
start = time.time()
algorithm = DDQN(model, gamma=GAMMA, lr=LEARNING_RATE)
end = time.time()
print("算法创建耗时: ", end - start)

# 创建Agent
start = time.time()
agent = Agent(algorithm, obs_dim, act_dim,
                      e_greed=E_GREED,
                      e_greed_decrement=E_GREED_DECREMENT,
                      update_target_steps=UPDATE_TARGET_STEPS)

end = time.time()
print("Agent创建耗时: ", end - start)

# 创建训练
train = Training()

# 开始训练
train.train(
    agent=agent,
    env=env,
    rpm=rmp,
    test_freq = TEST_FREQ,
    batch_size=BATCH_SIZE,
    max_steps=MAX_STEPS,
    memory_warmup_size=MEMORY_WARMUP_SIZE,
    max_episodes=MAX_EPISODES,
    path='model/大飞蛾_dueling.ckpt'
)
