# 网址：https://subaochen.github.io/reinforcement%20learning/2019/08/16/grid-world-value-function/
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.table import Table
from xml.dom.minidom import Document
#手动输入格子的大小
WORLD_SIZE = int(input("请输入状态个数:"))
# 两个终点的位置(下标从0开始,下同)
A_POS = [0,0]
# 状态B的位置
B_POS = [WORLD_SIZE-1, WORLD_SIZE-1]
# 折扣因子
DISCOUNT = 0.9
# 动作集={上,下,左,右}
ACTIONS = [np.array([0, -1]),  # left
           np.array([-1, 0]),  # up
           np.array([0, 1]),   # right
           np.array([1, 0])]   # down
# 策略,每个动作等概率
ACTION_PROB = 0.25

# 将数据写进xml文件中
def write_datato_xml(data,name):
    # 实例化一个Domcument
    dom = Document()
    # 创建根节点
    paper = dom.createElement("Paper")
    # 将根节点添加到domcument中
    dom.appendChild(paper)
    # 循环遍历所有数据，写入domcument中

    # 将sortnumber 写入
    for x in range(len(data)):
        # 创建sortnumber标签
        sortnumber = dom.createElement(name)
        # 将sortnumber加入到根节点paper
        paper.appendChild(sortnumber)
        # 取出每一个数据
        x_data = data[x]
        # 创建text标签
        sortnumber_text = dom.createTextNode(x_data)
        # 将text标签加入到sortnumber标签中
        sortnumber.appendChild(sortnumber_text)
        # 添加属性
        sortnumber.setAttribute("number",'{}'.format(x))

    with open("data.xml",'w',encoding='utf-8') as f:
        # f:文件对象，indent：每个tag前面填充的字符，addindent:每个子节点的缩进字符，newl：每个tag后填充的字符
        dom.writexml(f, indent='\t', newl='\n', addindent='\t')
        f.close()

# 绘图相关函数
def draw_image(image):
    fig, ax = plt.subplots()
    ax.set_axis_off()
    tb = Table(ax, bbox=[0, 0, 1, 1])
    nrows, ncols = image.shape
    width, height = 1.0 / ncols, 1.0 / nrows
    # 添加表格
    for (i, j), val in np.ndenumerate(image):
        tb.add_cell(i, j, width, height, text=val,
                    loc='center', facecolor='white')
    # 行标签
    for i, label in enumerate(range(len(image))):
        tb.add_cell(i, -1, width, height, text=label + 1, loc='right',
                    edgecolor='none', facecolor='none')
    # 列标签
    for j, label in enumerate(range(len(image))):
        tb.add_cell(WORLD_SIZE, j, width, height / 2, text=label + 1, loc='center',
                    edgecolor='none', facecolor='none')
    ax.add_table(tb)

def step(state, action):
    """每次走一步
    :param state:当前状态，坐标的list，比如[1,1]
    :param action:当前采取的动作，是对状态坐标的修正
    :return:下一个状态（坐标的list）和reward
    """
    if state == A_POS:
        return A_POS, 0
    if state == B_POS:
        return B_POS, 0

    next_state = (np.array(state) + action).tolist()
    x, y = next_state
    # 判断是否出界
    if x < 0 or x >= WORLD_SIZE or y < 0 or y >= WORLD_SIZE:
        reward = -1.0
        next_state = state
    else:
        reward = -1.0
    return next_state, reward

# 使用iterative policy evaluation 计算每个单元格的状态价值函数
def grid_world_value_function():
    # 状态价值函数的初值
    value = np.zeros((WORLD_SIZE, WORLD_SIZE))
    episode = 0
    history = {}
    status = [];
    while True:
        episode = episode + 1
        # 每一轮迭代都会产生一个new_value，直到new_value和value很接近即收敛为止
        new_value = np.zeros_like(value)
        for i in range(WORLD_SIZE):
            for j in range(WORLD_SIZE):
                for action in ACTIONS:
                    # 执行动作,转移到后继状态,并获得立即奖励
                    (next_i, next_j), reward = step([i, j], action)
                    # bellman equation
                    # 由于每个方向只有一个reward和s'的组合，这里的p(s',r|s,a)=1
                    # 贝尔曼(期望)方程
                    new_value[i, j] += ACTION_PROB * (reward + DISCOUNT * value[next_i, next_j])
        error = np.sum(np.abs(new_value - value))
        history[episode] = error
        # 迭代终止条件: 误差小于1e-4
        if error < 1e-4:
            draw_image(np.round(new_value, decimals=2))
            plt.title('$v_{\pi}$')
            plt.show()
            plt.close()
            break
        # 观察每一轮次状态价值函数及其误差的变化情况
        value1 = f"{episode}-{np.round(error,decimals=5)}:\n{np.round(new_value,decimals=2)}";
        status.append(value1);
        # print(f"{episode}-{np.round(error,decimals=5)}:\n{np.round(new_value,decimals=2)}")
        value = new_value
    write_datato_xml(status,"grid_world_value_function")
    return history, value

# 使用iterative policy evaluation（in place）算法计算每个单元格的状态价值函数
# def grid_world_value_function_in_place():
#     # 状态价值函数的初值
#     value = np.zeros((WORLD_SIZE, WORLD_SIZE))
#     episode = 0
#     history = {}
#     while True:
#         episode = episode + 1
#         old_value = value.copy()
#         for i in range(WORLD_SIZE):
#             for j in range(WORLD_SIZE):
#                 episode_value = 0
#                 for action in ACTIONS:
#                     (next_i, next_j), reward = step([i, j], action)
#                     # bellman equation
#                     # 由于每个方向只有一个reward和s'的组合，这里的p(s',r|s,a)=1
#                     value_s_prime = value[next_i, next_j]
#                     episode_value += ACTION_PROB * (reward + DISCOUNT * value_s_prime)
#                 value[i, j] = episode_value
#         error = np.sum(np.abs(old_value - value))
#         history[episode] = error
#         if error < 1e-4:
#             draw_image(np.round(old_value, decimals=2))
#             plt.title('$v_{\pi}$')
#             plt.show()
#             plt.close()
#             break
#         # 观察每一轮次状态价值函数及其误差的变化情况
#         print(f"in place-{episode}-{np.round(error,decimals=5)}:\n{np.round(value,decimals=2)}")
#     return history, value

# 计算格子世界的最优价值函数 （通过这个图就可以看出每个格子该往哪个方向）
def grid_world_optimal_policy():
    value = np.zeros((WORLD_SIZE, WORLD_SIZE))
    # 通过一个数组来表示每一个格子的最优动作，1表示在相应的方向上最优的
    optimal_policy = np.zeros((WORLD_SIZE, WORLD_SIZE, len(ACTIONS)))
    episode = 0
    while True:
        episode = episode + 1
        # keep iteration until convergence
        new_value = np.zeros_like(value)
        for i in range(WORLD_SIZE):
            for j in range(WORLD_SIZE):
                # 保存当前格子所有action下的state value
                action_values = []
                for action in ACTIONS:
                    (next_i, next_j), reward = step([i, j], action)
                    # 缓存动作值函数 q(s,a) = r + γ*v(s')
                    action_values.append(reward + DISCOUNT * value[next_i, next_j])
                # 根据贝尔曼最优方程,找出最大的动作值函数 q(s,a) 进行更新
                new_value[i, j] = np.max(action_values)
                # optimal_policy[i, j] = get_optimal_actions(action_values)
        error = np.sum(np.abs(new_value - value))
        #迭代终止条件: 误差小于1e-4
        if error < 1e-4:
            draw_image(np.round(new_value, decimals=2))
            plt.title('$v_{*}$')
            plt.show()
            plt.close()
            break
        # 观察每一轮次状态价值函数及其误差的变化情况
        print(f"{episode}-{np.round(error,decimals=5)}:\n{np.round(new_value,decimals=2)}")
        value = new_value

# def get_optimal_actions(values):
#     """计算当前轮次格子的最优动作
#     :param values:格子的状态价值
#     :return: 当前的最优动作。解读这个最优动作数组，要参考ACTIONS中四个动作的方向定义，
#     数值为1表示此动作为最优动作
#     """
#     optimal_actions = np.zeros(len(ACTIONS))
#     indices = np.where(values == np.amax(values))
#     for index in indices[0]:
#         optimal_actions[index] = 1
#     return optimal_actions


def plot_his(history, title):
    # for his in history:
    #     index, error = his.keys(), his.values()
    #     plt.plot(index, error)
    index, error = history.keys(), history.values()
    plt.plot(index, error)
    plt.title(title)
    plt.xlabel("episode")
    plt.ylabel("error")
    if len(history) != 1:
        plt.legend(["grid_world_value_function", "grid_world_value_function_in_place"])
    plt.show()


if __name__ == '__main__':

    history1, _ = grid_world_value_function()
    # history2, _ = grid_world_value_function_in_place()
    # plot_his([history1, history2], "iterative policy evaluation error")
    plot_his(history1, "iterative policy evaluation error")
    grid_world_optimal_policy()