import copy
import gym

from env import CliffWalkingEnv, PrintPolicy


class PolicyIteration:
    """策略迭代算法"""

    def __init__(self, env, theta, gamma):
        self.env = env
        self.v = [0] * self.env.ncol * self.env.nrow  # 初始化价值为0
        self.pi = [
            [0.25 for _ in range(4)] for _ in range(self.env.ncol * self.env.nrow)
        ]  # 初始化为均匀随机策略
        self.theta = theta  # 策略评估收敛阈值
        self.gamma = gamma  # 折扣因子

    def policyEvaluation(self):
        cnt = 0
        while True:
            cnt += 1

            max_diff = 0
            newV = [0] * self.env.ncol * self.env.nrow
            for s in reversed(range(self.env.ncol * self.env.nrow)):
                # 开始计算状态s下的所有Q(s,a)价值
                qsa_list = []
                for a in range(4):
                    qsa = 0
                    for prob, next_state, reward, _ in self.env.P[s][a]:
                        qsa += prob * (reward + self.gamma * self.v[next_state])
                    qsa_list.append(qsa)

                # 根据 Q(s,a) 计算 V(s)
                for a in range(4):
                    newV[s] += self.pi[s][a] * qsa_list[a]
                max_diff = max(max_diff, abs(newV[s] - self.v[s]))

            self.v = newV

            if max_diff < self.theta:
                break

        print("策略评估经过 %d 轮后完成" % cnt)

    def policyImprovement(self):
        for s in range(self.env.ncol * self.env.nrow):
            qsa_list = []
            for a in range(4):
                qsa = 0
                for prob, next_state, reward, _ in self.env.P[s][a]:
                    qsa += prob * (reward + self.gamma * self.v[next_state])
                qsa_list.append(qsa)
            max_qsa = max(qsa_list)  # 找到最大Q(s,a)
            cntq = qsa_list.count(max_qsa)  # 计算最大Q(s,a)的个数
            # 让所有最大Q(s,a)的动作均分概率
            self.pi[s] = [1 / cntq if qsa == max_qsa else 0 for qsa in qsa_list]

        print("策略改进完成")

    def PolicyIteration(self):
        while True:
            self.policyEvaluation()
            oldPi = copy.deepcopy(self.pi)
            self.policyImprovement()
            newPi = self.pi
            if oldPi == newPi:
                break


if __name__ == "__main__":
    # 悬崖环境
    ncol = 12
    nrow = 4
    cliffEnv = CliffWalkingEnv(ncol, nrow)
    cliffEnvActionMeaning = ["^", "v", "<", ">"]

    # 冰湖环境
    frozenEnv = gym.make("FrozenLake-v1")  # 创建环境
    frozenEnv = frozenEnv.unwrapped  # 解封装才能访问状态转移矩阵P
    frozenEnvActionMeaning = ["<", "v", ">", "^"]

    theta = 1e-5
    gamma = 0.9

    # 策略迭代
    PI = PolicyIteration(cliffEnv, theta, gamma)
    PI.PolicyIteration()
    PrintPolicy(PI, cliffEnvActionMeaning, list(range(37, 47)), [47])
