"""
A simple example for Reinforcement Learning using table lookup Q-learning method.
An agent "o" is on the left of a 1 dimensional world, the treasure is on the rightmost location.
Run this program and to see how the agent will improve its strategy of finding the treasure.

View more on my tutorial page: https://morvanzhou.github.io/tutorials/
"""

import numpy as np
# import pandas as pd # numpy 运行效率远远高过 pandas！
import time

np.random.seed(2)  # reproducible 设置随机数重复：每次运行相同

CYCLE =2000 # 调式用参数


N_STATES = 6   # the length of the 1 dimensional world 
ACTIONS = [0, 1]     # 0:'left'；1:'right'
#ACTIONS = ['left', 'right']     # available actions
EPSILON = 0.9   # greedy police 非随机行动比例90%，随机游走比例10%（只有探索才可能发现最优解！）
ALPHA = 0.1     # learning rate 应该对应于Q表成熟的速度，之所以选择低值，可能是为了避免震荡
GAMMA = 0.9    # discount factor 对未来的折扣，直接影成熟响Q表中，离目标各值的衰减比例
MAX_EPISODES = 10*CYCLE   # maximum episodes 总学习回合数
FRESH_TIME = 0.01    # fresh time for one move 行动间隔时间，便于观察

Terminal = N_STATES - 1 # 目标状态值


def build_q_table(n_states, actions): # 建立初始Q表
    table = np.zeros((n_states, len(actions))) # 试图将Q_table修改为 Numpy.array类型，便于进一步学习RL
#    for i in range(N_STATES-1): # 学习错误Q表的恢复，调试用：主要影响第一轮！若有负值，可能影响前n轮
#        table.iloc[i, :] =[7,6-i] #[[0,3,6],[1,3,5],[2,3,4],[3,3,3],[4,3,2],[5,3,1]]
    return table


def choose_action(state, q_table): # 决策下一步动作
    state_actions = q_table[state]
    if (np.random.uniform() > EPSILON) or (state_actions[0] == state_actions[1]):  # act non-greedy or state-action have no value
        action_name = np.random.choice(ACTIONS)
    else:   # act greedy
        action_name = ACTIONS[np.random.choice(np.where(state_actions==np.max(state_actions))[0])]
    return action_name


def get_env_feedback(S, A): # 获得环境反馈：行动的结果、成绩等
    # This is how agent will interact with the environment
    if A == 1:    # move right
        S_ = S + 1
        if S_ == Terminal:   # terminate
            R = 10
        else:
            R = 0
    else:   # move left
        R = 0
        if S == 0:
            S_ = S  # reach the wall
        else:
            S_ = S - 1
    return S_, R


def update_env(S, episode, step_counter):
    # This is how environment be updated 显示机器学习界面
    env_list = ['-']*(N_STATES-1) + ['T']   # '---------T' our environment

    if S == 'terminal':
        interaction = '\nEpisode %s: total_steps = %s \n' % (episode+1, step_counter)
        print(interaction)
    else:
        env_list[S] = 'o'
        interaction = ''.join(env_list)
        print(interaction,end='')


def rl():
    # main part of RL loop
    Q_table = build_q_table(N_STATES, ACTIONS)
    for episode in range(MAX_EPISODES):
        step_counter = 0
        S = 0
        is_terminated = False

        while not is_terminated:

            A = choose_action(S, Q_table)
            S_, R = get_env_feedback(S, A)  # take action & get next state and reward
            q_predict = Q_table[S][A]

            if S_ != Terminal:
                q_target = R + GAMMA * np.max(Q_table[S_])   # next state is not terminal
            else:
                q_target = R     # next state is terminal
                is_terminated = True    # terminate this episode

            Q_table[S][A] += ALPHA * (q_target - q_predict)  # update
            S = S_  # move to next state

#            update_env(S, episode, step_counter+1)
            step_counter += 1
        if episode % CYCLE == 0 : # 学习调试
            print('\nepisode: %d,step_counter: %d:,R: %d.'%(episode+1,step_counter,R))
            print(Q_table)
    return Q_table


if __name__ == "__main__":
    Q_table = rl()
