# 这里是我写的一个例子
import numpy as np

# 定义环境，这里以一个简单的 4x4 网格世界为例
# 状态数
n_states = 16
# 动作数：上、下、左、右
n_actions = 4
# 折扣因子
gamma = 0.9

# 定义转移概率 P(s'|s, a) 和奖励 R(s, a, s')
# 这里简单假设在网格世界中，每个动作有一定概率到达相邻状态，奖励为 -1 直到到达终点
P = np.zeros((n_states, n_actions, n_states))
R = np.zeros((n_states, n_actions, n_states))

# 定义网格世界的终点状态
terminal_states = [0, 15]

# 初始化转移概率和奖励
for s in range(n_states):
    if s in terminal_states:
        continue
    for a in range(n_actions):
        if a == 0:  # 上
            s_next = s - 4 if s >= 4 else s
        elif a == 1:  # 下
            s_next = s + 4 if s < 12 else s
        elif a == 2:  # 左
            s_next = s - 1 if s % 4 != 0 else s
        elif a == 3:  # 右
            s_next = s + 1 if (s + 1) % 4 != 0 else s
        P[s, a, s_next] = 1
        R[s, a, s_next] = -1


# 策略评估函数
def policy_evaluation(policy, theta=0.0001):
    V = np.zeros(n_states)
    while True:
        delta = 0
        for s in range(n_states):
            v = V[s]
            new_v = 0
            for a in range(n_actions):
                for s_next in range(n_states):
                    new_v += policy[s, a] * P[s, a, s_next] * (R[s, a, s_next] + gamma * V[s_next])
            V[s] = new_v
            delta = max(delta, np.abs(v - V[s]))
        if delta < theta:
            break
    return V


# 策略改进函数
def policy_improvement(V):
    policy = np.zeros((n_states, n_actions))
    for s in range(n_states):
        q_values = np.zeros(n_actions)
        for a in range(n_actions):
            for s_next in range(n_states):
                q_values[a] += P[s, a, s_next] * (R[s, a, s_next] + gamma * V[s_next])
        best_action = np.argmax(q_values)
        policy[s, best_action] = 1
    return policy


# 策略迭代函数
def policy_iteration():
    # 初始随机策略
    policy = np.ones((n_states, n_actions)) / n_actions
    while True:
        V = policy_evaluation(policy)
        new_policy = policy_improvement(V)
        if np.all(policy == new_policy):
            break
        policy = new_policy
    return policy, V


# 运行策略迭代
optimal_policy, optimal_value = policy_iteration()

print("最优策略:")
print(optimal_policy)
print("最优值函数:")
print(optimal_value)
    