import gym
from DQN import DeepQNetwork

def runmaze():
    total_steps = 0
    for i_episode in range(100):

        observation = env.reset()
        ep_r = 0
        while True:
            # 初始化环境
            env.render()
            # DQN 根据观测值选择行为
            action = RL.choose_action(observation)
            # 环境根据行为给出下一个 state, reward, 是否终止
            observation_, reward, done, info = env.step(action)
            # DQN 存储记忆
            RL.store_transition(observation, action, reward, observation_)
            # 控制学习起始时间和频率 (先累积一些记忆再开始学习)
            ep_r += reward
            if total_steps > 1000:
                RL.learn()

            if done:
                print('episode: ', i_episode,
                      'ep_r: ', round(ep_r, 2),
                      ' epsilon: ', round(RL.epsilon, 2))
                break
            # 将下一个 state_ 变为 下次循环的 state
            observation = observation_
            total_steps += 1


if __name__ == "__main__":
    env = gym.make('Acrobot-v1')
    RL = DeepQNetwork(n_actions=env.action_space.n,
                      n_features=env.observation_space.shape[0],
                      learning_rate=0.01,
                      reward_decay=0.9,
                      e_greedy=0.9,
                      replace_target_iter=200,  # 每 200 步替换一次 target_net 的参数
                      memory_size=2000, # 记忆上限
                      # output_graph=True   # 是否输出 tensorboard 文件
                      )
    runmaze()
    RL.plot_cost()  # 观看神经网络的误差曲线