"""
训练模型
"""
import time

from parl.algorithms import DDQN

from Examples.DQNExamples.Dimension2D.Dim2DAgent import Dim2DAgent
from Examples.DQNExamples.EazyGame.EasyGameModel import Model
from Examples.DQNExamples.EazyGame.EasyGameRPM import RPM
from Examples.DQNExamples.EazyGame.Env2D import Env2D
from Examples.DQNExamples.EazyGame.Train2D import Training2D

MEMORY_SIZE = 2000  # 记忆库大小
BATCH_SIZE = 32  # 批大小
GAMMA = 0.9  # 奖励衰减率
LEARNING_RATE = 0.0001  # 学习率
E_GREED = 1  # 贪心率
E_GREED_DECREMENT = 1e-6  # F贪心率衰减率
UPDATE_TARGET_STEPS = 200  # 更新目标网络的步数
MAX_EPISODES = 90  # 最大回合数
MAX_TURN = 1000  # 最大轮数
MAX_STEPS = 200000  # 训练每回合最大步数, 避免训练时间过长
MAX_TEST_STEPS = 20000  # 测试每回合最大步数, 避免时间过长
MEMORY_WARMUP_SIZE = 64  # replay memory预热大小, 需要预存一些经验数据后才开始训练
TEST_FREQ = 100  # 测试的频率, 每隔多少回合测试一次
FRAME_SKIPPING = 10  # 每隔多少帧采样一次

for turn in range(MAX_TURN):
    # 创建环境
    env = Env2D()

    # 获取动作维度和状态维度
    act_dim = 4
    # obs_dim = env.observation_space.n
    obs_dim = (10, 10)

    # 经验回放池
    start = time.time()
    rpm = RPM(MEMORY_SIZE, obs_dim, 4)
    end = time.time()
    print("经验回放池创建耗时: ", end - start)

    # 创建模型
    start = time.time()
    model = Model(act_dim=act_dim, obs_dim=obs_dim, dueling=True)
    end = time.time()
    print("模型创建耗时: ", end - start)

    # 创建算法
    start = time.time()
    algorithm = DDQN(model, gamma=GAMMA, lr=LEARNING_RATE)
    end = time.time()
    print("算法创建耗时: ", end - start)

    # 创建Agent
    start = time.time()
    agent = Dim2DAgent(algorithm, obs_dim, act_dim,
                       e_greed=E_GREED,
                       e_greed_decrement=E_GREED_DECREMENT,
                       update_target_steps=UPDATE_TARGET_STEPS)
    end = time.time()
    print("Agent创建耗时: ", end - start)

    # 创建训练
    train = Training2D()

    # 开始训练
    train.train(
        agent=agent,
        env=env,
        rpm=rpm,
        batch_size=BATCH_SIZE,
        max_steps=MAX_STEPS,
        memory_warmup_size=MEMORY_WARMUP_SIZE,
        max_episodes=MAX_EPISODES,
        path='model/model.ckpt',
    )
    LEARNING_RATE = max(0.00001, LEARNING_RATE * 0.8)
    E_GREED = max(0.0001, E_GREED * 0.8)
