import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation

# 创建一个dict，用于存储每个整数大网格中包含的随机点
grid = {}
for i in range(12):
    for j in range(12):
        grid[(i, j)] = []

# 以[3,5]和[6,5]为中心，半径为4的圆，随机生成100个点
centers = [[3, 5], [6, 5]]
radius = 3
points = []
for center in centers:
    for i in range(100):
        theta = np.random.uniform(0, 2 * np.pi)
        r = np.random.uniform(0, radius)
        x = center[0] + r * np.cos(theta)
        y = center[1] + r * np.sin(theta)
        points.append([x, y])
        grid[(int(x), int(y))].append([x, y])
points = np.array(points)
# 绘制散点图
plt.scatter(points[:, 0], points[:, 1], s=10)
print(grid)

grid_size = 10
actions = ['up', 'down', 'left', 'right']
num_actions = len(actions)
Q = np.zeros((grid_size, grid_size, num_actions))
# 定义参数
alpha = 0.1  # 学习率
gamma = 0.9  # 折扣因子
epsilon = 0.1  # 探索率
num_episodes = 50000
path = []
steps = 0


def choose_action(state):
    if np.random.uniform() < epsilon or np.sum(Q[state]) == 0:
        return np.random.randint(num_actions)
    else:
        return np.argmax(Q[state])


def state_from_position(position):
    return position[0], position[1]


# 定义辅助函数，将状态转换为位置
def position_from_state(state):
    return state[0], state[1]


def executeAction(action, position):
    next_position = list(position)
    if actions[action] == 'up':
        next_position[0] = max(position[0] - 1, 0)
    elif actions[action] == 'down':
        next_position[0] = min(position[0] + 1, grid_size - 1)
    elif actions[action] == 'left':
        next_position[1] = max(position[1] - 1, 0)
    elif actions[action] == 'right':
        next_position[1] = min(position[1] + 1, grid_size - 1)

    return next_position


minLen = 1000000000
tempPath = []
elsePointNum = 200
start = [0, 0]

print('Training...')
for episode in range(num_episodes):
    gridCopy = grid.copy()
    elsePointNum = 200
    tempPath = []
    if episode % 100 == 0:
        print('Episode:', episode + 1, '/', num_episodes, ";  本次训练步数：", steps)
    position = start
    state = state_from_position(position)
    steps = 0
    while elsePointNum != 0:
        steps += 1
        tempPath.append(position)
        action = choose_action(state)
        if episode > 2800:
            print(elsePointNum,action)
        # next_position = list(position)

        next_position = executeAction(action, position)

        next_state = state_from_position(next_position)

        gridCell = gridCopy[(next_position[1], next_position[0])]
        r = 0
        # if gridCell == [-1]:
        #     r = -5
        # elif gridCell:
        #     r = 10
        #     elsePointNum -= len(gridCell)
        #     gridCopy[(next_position[1], next_position[0])] = [-1]
        # else:
        #     r = -1

        if len(gridCell) > 0:
            r = 10
            elsePointNum -= len(gridCell)
            gridCopy[(next_position[1], next_position[0])] = []
        else:
            r = -1

        reward = -1

        Q[state][action] += alpha * (reward + gamma * np.max(Q[next_state]) - Q[state][action])

        position = next_position
        state = next_state

    if steps < minLen:
        minLen = steps
        path = tempPath.copy()
# 显示网格
print(minLen)
path = np.array(path)
plt.plot(path[:, 1], path[:, 0], linewidth=2)
# 线颜色为红色
plt.plot(path[:, 1], path[:, 0], color='red', linewidth=2)
plt.xlim(0, 10 - 1)
plt.ylim(0, 10 - 1)
plt.xticks(np.arange(0, 10, 1))
plt.yticks(np.arange(0, 10, 1))
plt.grid(True)
plt.xlabel('Column')
plt.ylabel('Row')
plt.title('Shortest Path')
plt.show()
