import gym
from parl.algorithms import DDQN

from Examples.DQNExamples.Dimension2D.Dim2DAgent import Dim2DAgent
from Examples.DQNExamples.Dimension2D.Dim2DModel import Dim2DModel
from Examples.DQNExamples.Dimension2D.Dim2DRPM import Dim2DReplayMemory
from Examples.DQNExamples.Dimension2D.Dim2DTraining import Dim2DTraining

MEMORY_SIZE = 2000  # replay memory的大小，越大越占用内存
BATCH_SIZE = 32  # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来
GAMMA = 0.99  # reward的衰减因子，一般取 0.9 到 0.999 不等
LEARNING_RATE = 0.0001  # 学习率
E_GREED = 0.5  # epsilon-greedy算法的epsilon值,用于随机探索
E_GREED_DECREMENT = 1e-7  # epsilon-greedy算法的衰减值,随着训练次数增加而减小
UPDATE_TARGET_STEPS = 200  # 每隔一定步数更新 TARGET_Q 网络
MAX_EPISODES = 10000  # 最大回合数
MAX_STEPS = 2000  # 训练每回合最大步数, 避免训练时间过长
MAX_TEST_STEPS = 20000  # 测试每回合最大步数, 避免时间过长
MEMORY_WARMUP_SIZE = 200  # replay memory预热大小, 需要预存一些经验数据后才开始训练
TEST_FREQ = 100  # 测试的频率, 每隔多少回合测试一次
FRAME_SKIPPING = 1  # 每隔多少帧采样一次
RENDER = True  # 训练是否渲染
TEST_RENDER = False  # 测试是否渲染
TARGET_UPDATE_FREQ = 5  # 每隔多少次训练更新一次target_net
LOAD_MODEL = True  # 是否加载模型
SAVE_MODEL = True  # 是否保存模型
SAVE_PATH = './model/break_out.ckpt'  # 模型保存路径

# 创建环境
# env = gym.make('CartPole-v1')
if RENDER:
    env = gym.make('BreakoutNoFrameskip-v0', render_mode='human')
else:
    env = gym.make('BreakoutNoFrameskip-v0')
# env = gym.make('BreakoutNoFrameskip-v0')

# 获取动作维度和状态维度
obs_dim = env.observation_space.shape
act_dim = env.action_space.n

rpm = Dim2DReplayMemory(  # DQN的经验回放池
    MEMORY_SIZE,  # 记忆库大小
    obs_dim,  # 状态维度
    0  # 动作维度 (因为是离散动作，所以为0)
)

model = Dim2DModel(  # 创建模型
    act_dim,  # 动作维度
    obs_dim,  # 状态维度
)
algorithm = DDQN(  # 创建DQN算法
    model,  # 网络模型
    gamma=GAMMA,  # reward的衰减因子
    lr=LEARNING_RATE  # 学习率
)
agent = Dim2DAgent(  # 创建Agent
    algorithm,  # 算法
    obs_dim,  # 状态维度
    act_dim,  # 动作维度
    update_target_steps=UPDATE_TARGET_STEPS,  # 每隔多少步更新一次target_net
    e_greed=E_GREED,  # epsilon贪心算法的epsilon
    e_greed_decrement=E_GREED_DECREMENT  # epsilon的衰减
)
train = Dim2DTraining()
# train.train(env, agent, rpm,
#             max_episodes=MAX_EPISODES,
#             max_steps=MAX_STEPS,
#             batch_size=BATCH_SIZE,
#             max_test_steps=MAX_TEST_STEPS,
#             memory_warmup_size=MEMORY_WARMUP_SIZE,
#             target_update_freq=TARGET_UPDATE_FREQ,
#             render=RENDER,
#             test_render=TEST_RENDER,
#             save_path=SAVE_PATH,
#             load_model=LOAD_MODEL,
#             test_freq=TEST_FREQ, )


train.test(env, agent,  max_steps=MAX_TEST_STEPS, render=TEST_RENDER, load_model=LOAD_MODEL, save_path=SAVE_PATH)
