# -*- coding: UTF-8 -*-
"""
Sarsa is a online updating method for Reinforcement learning.

Unlike Q learning which is a offline updating method, 
Sarsa is updating while in the current trajectory.

You will see the sarsa is more coward when punishment is close 
because it cares about all behaviours,
while q learning is more brave because it only cares about maximum behaviour.
"""

from maze_env import Maze
from RL_brain import SarsaTable
from RL_brain import QLearningTable
from RL_brain import SarsaLambdaTable

def Sarsaupdate():
    for episode in range(1000):
        print( '\n' , episode, ':\n')
        print ('RL.q_table:\n', RL.q_table)
        print ('\nRL.eligibility_trace:\n', RL.eligibility_trace)
        # 初始化环境,初始化state
        observation = env.reset()

        # Sarsa 根据 state 观测选择行为
        action = RL.choose_action(str(observation))

        while True:
            # 刷新环境
            env.render()

            # 在环境中采取行为, 获得下一个 state_ (obervation_), reward, 和是否终止
            observation_, reward, done = env.step(action)

            # 根据下一个 state (obervation_) 选取下一个 action_
            # 与QLearning 区别，就在于action_next
            action_ = RL.choose_action(str(observation_))

            # 从 (s, a, r, s, a) 中学习, 更新 Q_tabel 的参数 ==> Sarsa
            RL.learn(str(observation), action, reward, str(observation_), action_)

            # 将下一个当成下一步的 state (observation) and action
            observation = observation_
            action = action_

            # 终止时跳出循环
            if done:
                break

    # 大循环完毕
    print('game over')
    env.destroy()


def QLearningupdate():
    # 学习 100 回合
    for episode in range(1000):
        # 初始化 state 的观测值 比如（1，1）移动点的位置
        observation = env.reset()
        # 每一个回合中，做：
        while True:
            # 更新可视化环境
            env.render()

            # RL 大脑根据 state 的观测值挑选 action
            action = RL.choose_action(str(observation))

            # 探索者在环境中实施这个 action, 并得到环境返回的下一个 state 观测值,
            # reward 和 done (是否是掉下地狱或者升上天堂)  observation_代表observation_next
            observation_, reward, done = env.step(action)

            # RL 从这个序列 (state, action, reward, state_) 中学习
            # 也就是 从当前state 已经选择的动作和奖励，以及下一个状态
            RL.learn(str(observation), action, reward, str(observation_))

            # 将下一个 state 的值传到下一次循环  更新到下一个状态中去
            observation = observation_

            # 如果掉下地狱或者升上天堂, 这回合就结束了
            if done:
                break

    # 结束游戏并关闭窗口
    print('game over')
    print (RL.q_table)
    env.destroy()


if __name__ == "__main__":
    env = Maze()
    # RL = SarsaTable(actions=list(range(env.n_actions)))
    # env.after(100, Sarsaupdate)
    # # RL = QLearningTable(actions=list(range(env.n_actions)))
    # # env.after(100, QLearningupdate)

    RL = SarsaLambdaTable(actions=list(range(env.n_actions)))
    env.after(100, Sarsaupdate)

    env.mainloop()