import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.colors import ListedColormap

# 8个方向
actions = ['up', 'down', 'left', 'right', 'upleft', 'upright', 'downleft', 'downright']
grid = np.zeros((10, 10))
cmap = ListedColormap(['white', 'red', 'green', 'blue'])
# 随机概率默认0.8
rd = 0.9


def cerateQtable():
    my_dict = {}
    for i in range(100):
        for j in range(100):
            key = (i, j)
            value = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
            my_dict[key] = value
    return my_dict


def chooseAction(state, qtable):
    state_actions = qtable[state]
    if (np.random.uniform() > rd) or ((state_actions == 0).all()):
        action_name = np.random.choice(actions)
    else:
        action_name = actions[state_actions.argmax()]
    return action_name


# 创建一个二维的迷宫
def createMaze():
    maze = np.zeros((10, 10))
    return maze


maze = createMaze()


# 选择下一个状态
def get_env_feedback(S, A):
    s_, r = S, 0
    if S == (99, 99):
        return 'terminal', 100
    if A == 'up':
        if S[0] == 0:
            pass
        else:
            s_ = (S[0] - 1, S[1])
    if A == 'down':
        if S[0] == 99:
            pass
        else:
            s_ = (S[0] + 1, S[1])
    if A == 'left':
        if S[1] == 0:
            pass
        else:
            s_ = (S[0], S[1] - 1)
    if A == 'right':
        if S[1] == 99:
            pass
        else:
            s_ = (S[0], S[1] + 1)
    if A == 'upleft':
        if S[0] == 0 or S[1] == 0:
            pass
        else:
            s_ = (S[0] - 1, S[1] - 1)
    if A == 'upright':
        if S[0] == 0 or S[1] == 99:
            pass
        else:
            s_ = (S[0] - 1, S[1] + 1)
    if A == 'downleft':
        if S[0] == 99 or S[1] == 0:
            pass
        else:
            s_ = (S[0] + 1, S[1] - 1)
    if A == 'downright':
        if S[0] == 99 or S[1] == 99:
            pass
        else:
            s_ = (S[0] + 1, S[1] + 1)

    # 计算曼哈顿距离
    # if (abs(S[0] - 99) + abs(S[1] - 99)) != 0:
    #     r = 1 / (abs(S[0] - 99) + abs(S[1] - 99))
    # 计算欧式距离
    if (abs(S[0] - 99) ** 2 + abs(S[1] - 99) ** 2) != 0:
        r = 5 / (abs(S[0] - 99) ** 2 + abs(S[1] - 99) ** 2)
    return s_, r


def update_env(S, episode, step_counter):
    # if S !="terminal":
    #     grid = np.zeros((10, 10))
    #     grid[S[0], S[1]] = 1
    #     plt.imshow(grid, cmap=cmap, extent=[0, 10, 0, 10])
    #     plt.show()
    #     time.sleep(0.1)
    # 断言为整形
    # print(S[0], S[1])
    if S == (99, 99):
        # print(maze)
        print('Episode %s: total_steps = %s' % (episode + 1, step_counter))


path = []


# 根据训练好的Q表，选择最优路径
def chooseBestAction(state, qtable):
    global path  # global声明path是全局变量
    path.append(state)
    state_actions = qtable[state]
    action_name = actions[state_actions.argmax()]
    if state == (99, 99):
        return
    if action_name == 'up':
        if state[0] == 0:
            pass
        else:
            state = (state[0] - 1, state[1])
    if action_name == 'down':
        if state[0] == 99:
            pass
        else:
            state = (state[0] + 1, state[1])
    if action_name == 'left':
        if state[1] == 0:
            pass
        else:
            state = (state[0], state[1] - 1)
    if action_name == 'right':
        if state[1] == 99:
            pass
        else:
            state = (state[0], state[1] + 1)
    chooseBestAction(state, qtable)


def rl():
    qTable = cerateQtable()
    for episode in range(50000):
        if episode == 50000:
            global rd
            rd = 0.95
        step_counter = 0
        S = (0, 0)
        is_terminated = False
        update_env(S, episode, step_counter)
        while not is_terminated:
            if episode == 50000-1:
                path.append(S)
            A = chooseAction(S, qTable)
            S_, R = get_env_feedback(S, A)
            R = -1
            q_predict = qTable[S][actions.index(A)]
            if S_ != 'terminal':
                q_target = R + 0.9 * qTable[S_].max()
            else:
                q_target = R
                is_terminated = True
            # print(q_target,q_predict)
            qTable[S][actions.index(A)] += 0.1 * (q_target - q_predict)
            S = S_
            step_counter += 1
            update_env(S, episode, step_counter)
    print('game over')
    return qTable


def main():
    arr = np.array([10,1, 2, 3, 4, 5, 6, 7, 8])
    print(arr[-1])
    qTable = rl()
    # chooseBestAction((0, 0), qTable)
    # print(path)
    # 遍历path。x和y都+0.5，使得散点不在网格线交点上
    path1 = [(x + 0.5, y + 0.5) for x, y in path]
    fig, ax = plt.subplots()
    ax.set_xlim(0, 100) # 设置x和y轴的限制
    ax.set_ylim(0, 100)
    # ax.set_xticks(range(101))
    # ax.set_yticks(range(101))
    # 坐标轴刻度每隔10显示
    ax.set_xticks(range(0, 101, 10))
    ax.set_yticks(range(0, 101, 10))

    ax.grid(True)

    # 初始化散点图对象
    scat, = ax.plot([], [], 'ro')
    # 点大小 10
    scat.set_markersize(1)

    # 初始化函数，清空画布
    def init():
        scat.set_data([], [])
        return scat,

        # 动画更新函数

    def animate(i):
        # 获取当前要绘制的点
        x, y = path1[i]
        scat.set_data(scat.get_xdata()[:i] + [x], scat.get_ydata()[:i] + [y])
        return scat,

        # 创建动画对象

    ani = animation.FuncAnimation(fig, animate, frames=len(path), init_func=init, blit=True, interval=20)

    # 显示图形
    plt.show()


if __name__ == "__main__":
    main()
