'''
https://zhuanlan.zhihu.com/p/29213893

'''

import numpy as np
import random

q = np.zeros((6, 6))
q = np.asmatrix(q)

# r表示奖励回馈表
r = np.array([[-1, -1, -1, -1, 0, -1],
              [-1, -1, -1, 0, -1, 100],
              [-1, -1, -1, 0, -1, -1],
              [-1, 0, 0, -1, 0, -1],
              [0, -1, -1, 0, -1, 100],
              [-1, 0, -1, -1, 0, 100]])
r = np.asmatrix(r)

gamma = 0.8

end_state = 5

# 循环1000次
for i in range(1000):
    # 随机从5个状态中抽取一个作为初始状态
    state = random.randint(0, 5)
    # 只要不是最终状态5,则进入循环
    while state != end_state:
        r_pos_action = []
        # 循环6次，将当前所处的状态所有可能的下一个状态都存储在r_pos_action
        for action in range(6):
            if r[state, action] >= 0:
                r_pos_action.append(action)

        # 然后从r_pos_action中随机抽取一个状态作为下一个状态
        next_state = r_pos_action[random.randint(0, len(r_pos_action) - 1)]
        # 计算当前状态到下一个状态的激励期望（下一个状态的奖励+gamma * 下一个状态的期望的最大值）
        q[state, next_state] = r[state, next_state] + gamma * q[next_state].max()
        state = next_state

# 经过以上的不断循环遍历，就能够将所有状态转移过程中最大的回馈奖励图计算出来
print(q)

# 开始试验训练出来的机器人是否会在规定时间内找到最佳路线
for i in range(10):
    print("第{}此验证".format(i + 1))

    state = random.randint(0, 5)
    print('机器人处于{}'.format(state))
    count = 0
    while state != 5:
        if count > 20:
            print('fail')
            break

        q_max = q[state].max()

        q_max_action = []

        for action in range(6):
            if q[state, action] == q_max:
                q_max_action.append(action)

        next_state = q_max_action[random.randint(0, len(q_max_action) - 1)]
        print('the robot goes to ' + str(next_state) + '.')
        state = next_state
        count += 1
