# -*- coding: utf-8 -*-
# May Saint Diana bless your coding!
# Created Time : 2021/12/6 21:08
# Author:Zhou Chenghao

import numpy as np
import pandas as pd
import time

N_STATES = 6  # 1维世界的宽度
ACTIONS = ['left', 'right']  # 探索者的可用动作
EPSILON = 0.9  # 贪婪度 greedy
ALPHA = 0.1  # 学习率
GAMMA = 0.9  # 奖励递减值
MAX_EPISODES = 10  # 最大回合数
FRESH_TIME = 0.1  # 移动间隔时间


def build_q_table(n_states, actions):
    """
    建立Q表
    :param n_states: 状态数量
    :param actions:  动作
    :return:
    """
    table = pd.DataFrame(
        np.zeros((n_states, len(actions))),  # q_table 全 0 初始
        columns=actions,  # columns 对应的是行为名称
    )
    return table


# q_table:
"""
   left  right
0   0.0    0.0
1   0.0    0.0
2   0.0    0.0
3   0.0    0.0
4   0.0    0.0
5   0.0    0.0
"""


def choose_action(state, q_table):
    """
    在某个 state 地点, 选择行为
    :param state:   状态，即地点
    :param q_table: q表
    :return:  返回下一个动作
    """
    state_actions = q_table.iloc[state, :]  # 选出这个 state 的所有 action 值
    if (np.random.uniform() > EPSILON) or (state_actions.all() == 0):  # 非贪婪 or 或者这个 state 还没有探索过
        action_name = np.random.choice(ACTIONS)
    else:
        action_name = state_actions.idxmax()  # 贪婪模式 选择最大值
    return action_name


def get_env_feedback(S, A):
    """
    和环境交互，获得奖励值
    :param S: 当前的状态
    :param A: 采取的动作
    :return: 返回下一个状态和这个动作的奖励
    """
    # This is how agent will interact with the environment
    if A == 'right':  # move right
        if S == N_STATES - 2:  # terminate
            S_ = 'terminal'
            R = 1
        else:
            S_ = S + 1
            R = 0
    else:  # move left
        R = 0
        if S == 0:
            S_ = S  # reach the wall
        else:
            S_ = S - 1
    return S_, R


def update_env(S, episode, step_counter):
    """
    更新环境
    :param S:  当前状态
    :param episode: 游戏进行的轮数
    :param step_counter: 这一轮走了几步
    :return:
    """
    # This is how environment be updated
    env_list = ['-'] * (N_STATES - 1) + ['T']  # '---------T' our environment
    if S == 'terminal':
        interaction = 'Episode %s: total_steps = %s' % (episode + 1, step_counter)
        print('\r{}'.format(interaction), end='')
        time.sleep(2)
        print('\r                                ', end='')
    else:
        env_list[S] = 'o'
        interaction = ''.join(env_list)
        print('\r{}'.format(interaction), end='')
        time.sleep(FRESH_TIME)


def rl():
    """
    运行强化学习 主程序
    :return:
    """
    q_table = build_q_table(N_STATES, ACTIONS)  # 初始化 q table
    for episode in range(MAX_EPISODES):  # 回合
        # print("第{0}回合：".format(episode + 1))
        step_counter = 0
        S = 0  # 回合初始位置
        is_terminated = False  # 是否回合结束
        update_env(S, episode, step_counter)  # 环境更新
        while not is_terminated:
            A = choose_action(S, q_table)  # 根据q表和当前状态，选行为
            S_, R = get_env_feedback(S, A)  # 实施行为A，得到环境的反馈R和下一个状态S_
            q_predict = q_table.loc[S, A]  # 在q表里，估算的(状态-行为)值
            if S_ != 'terminal':
                q_target = R + GAMMA * q_table.iloc[S_, :].max()  # 实际的(状态-行为)值 (回合没结束)
            else:
                q_target = R  # 实际的(状态-行为)值 (回合结束)
                is_terminated = True  # terminate this episode
            q_table.loc[S, A] += ALPHA * (q_target - q_predict)  # q_table 更新
            S = S_  # 探索者移动到下一个 state
            update_env(S, episode, step_counter + 1)  # 环境更新
            step_counter += 1
    return q_table


if __name__ == "__main__":
    q_table = rl()
    print('\r\nQ-table:\n')
    print(q_table)
