import numpy as np

def markov_reward_process():
    np.random.seed(0)
    # 定义状态转移概率矩阵P
    P = [
        [0.9,0.1,0.0,0.0,0.0,0.0],
        [0.5,0.0,0.5,0.0,0.0,0.0],
        [0.0,0.0,0.0,0.6,0.0,0.4],
        [0.0,0.0,0.0,0.0,0.3,0.7],
        [0.0,0.2,0.3,0.5,0.0,0.0],
        [0.0,0.0,0.0,0.0,0.0,1.0],
    ]
    P = np.array(P)
    # 定义奖励函数
    rewards = [-1,-2,-2,10,1,0]
    # 定义折扣因子
    gamma = 0.5
    # 给定一条序列，计算从某个索引（起始状态）开始到序列最后（终止状态）得到的回报
    def compute_return(start_index,chain,gamma):
        G = 0
        for i in reversed(range(start_index,len(chain))):
            G = rewards[chain[i]-1] + gamma * G
        return G

    # 一个序列状态，s1-s2-s3-s6
    chain = [1,2,3,6]
    start_index = 0
    G = compute_return(start_index,chain,gamma)
    print(f"根据本序列计算得到回报为： {G}")

    def compute(P,rewards,gamma,states_num):
        '''利用贝尔曼方程的矩阵形式计算解析解，states_num是MRP的状态数'''
        # 将rewards写成列向量形式
        rewards = np.array(rewards).reshape((-1,1))
        # 计算每个状态的价值
        value = np.linalg.inv(np.eye(states_num,states_num) - gamma * P) @ rewards
        return value

    V = compute(P,rewards,gamma,len(rewards))
    print(f"MRP的价值函数(MRP中每个状态价值分别为)为:\n{V}")

if __name__ == '__main__':
    markov_reward_process()
