import copy

class CliffWalkingEnv:
    def __init__(self, ncol = 12, nrow=4):
        self.ncol = ncol
        self.nrow = nrow
        # 转移矩阵P,每个位置都是一个state, P[state][action] = [(p, next_state, reward, done)]
        self.P = self.createP()

    def createP(self):
        # 原点在左上角
        # 4中动作，change[0]:上, change[1]:下, change[2]:左, change[3]:右。
        change = [[0, -1], [0, 1], [-1, 0], [1, 0]]
        P = [[[] for j in range(len(change))] for i in range(self.nrow*self.ncol)]
        for y in range(self.nrow):
            for x in range(self.ncol):
                for a in range(4):
                    pos = self.getPos(x, y)
                    if self.isDone(x, y):  # 当前在悬崖或者目标位置,无法继续交互，奖励为0
                        P[pos][a] = [(1, pos, 0, True)] 
                        continue
                    # 其他位置
                    next_x = min(self.ncol - 1, max(0, x + change[a][0]))
                    next_y = min(self.nrow - 1, max(0, y + change[a][1]))
                    next_pos = self.getPos(next_x, next_y)
                    reward = -1
                    done = False
                    # 下一个位置在悬崖或者终点
                    if self.isDone(next_x, next_y): # 下一个位置在悬崖或者目标
                        done = True
                        if self.isCliff(next_x, next_y): # 下一个位置在悬崖
                            reward = -100
                    P[pos][a] = [(1, next_pos, reward, done)]
        return P

    def getPos(self, x, y):
        return y*self.ncol + x


    def isDone(self, x, y):
        # 当前在悬崖或者目标位置
        return self.isEnd(x, y) or self.isCliff(x, y)

    def isEnd(self, x, y): 
        # 是否目标位置
        return y == self.nrow - 1 and x == self.ncol - 1

    def isCliff(self, x, y):
        # 是否是悬崖
        return y == self.nrow - 1 and x > 0 and x < self.ncol - 1

class PolicyIteration:
    def __init__(self, env: CliffWalkingEnv, theta, gamma):
        self.env = env
        self.v = [0] * self.env.ncol * self.env.nrow  # 初始化价值为0
        self.pi = [[0.25, 0.25, 0.25, 0.25] for i in range(self.env.ncol * self.env.nrow)]  # 初始化为均匀随机策略
        self.theta = theta  # 策略评估收敛阈值
        self.gamma = gamma  # 折扣因子
    
    def policy_evaluation(self):
        cnt = 1
        while 1:
            max_diff = 0
            new_v = [0] * self.env.ncol * self.env.nrow
            for s in range(self.env.ncol * self.env.nrow):
                qsa_list = []
                for a in range(4):
                    qsa = 0
                    for res in self.env.P[s][a]:
                        p, next_state, r, done = res
                        qsa += p * (r + self.gamma * self.v[next_state] * (1-done))
                    qsa_list.append(self.pi[s][a] * qsa) # 策略Pi
                new_v[s] = sum(qsa_list)  # 更新状态价值函数
                max_diff = max(max_diff, abs(new_v[s] - self.v[s]))
            self.v = new_v
            if max_diff < self.theta: 
                break  # 满足收敛条件
            cnt += 1
        print("Policy Evaluation:", cnt)
    
    def policy_improvement(self):
        for s in range(self.env.nrow * self.env.ncol):
            qsa_list = []
            for a in range(4):
                qsa = 0
                for res in self.env.P[s][a]:
                    p, next_state, r, done = res
                    qsa += p * (r + self.gamma * self.v[next_state] * (1-done))
                qsa_list.append(qsa)
            maxq = max(qsa_list)
            cntq = qsa_list.count(maxq)  # 计算有几个动作得到了最大的Q值
            self.pi[s] = [1/cntq if q == maxq else 0 for q in qsa_list]  # 重新分配概率
        print("Policy Improvement")
        return self.pi

    def policy_iteration(self):
        while 1:
            self.policy_evaluation()
            old_pi = copy.deepcopy(self.pi)  # 将列表进行深拷贝,方便接下来进行比较
            new_pi = self.policy_improvement()
            if old_pi == new_pi:
                break

def print_agent(agent, action_meaning, disaster=[], end=[]):    
    print("状态价值：")
    for  y in range(agent.env.nrow):
        for x in range(agent.env.ncol):
            pos = get_env_pos(agent.env, x, y)
            # 为了输出美观,保持输出6个字符
            print('%6.6s' % ('%.3f' % agent.v[pos]), end=' ')
        print()
    print("策略：")
    for y in range(agent.env.nrow):
        for x in range(agent.env.ncol):
            pos = get_env_pos(agent.env, x, y)
            if pos in disaster:
                print('****', end=' ')
            elif pos in end:
                print('EEEE', end=' ')
            else:
                a = agent.pi[pos]
                pi_str = ''
                for i in range(len(action_meaning)):
                    pi_str += action_meaning[i] if a[i] > 0 else 'o'
                print(pi_str, end=' ')
        print()

def get_env_pos(env, x, y):
    return y*env.ncol + x

class ValueIteration:
    def __init__(self, env: CliffWalkingEnv, theta, gamma):
        self.env = env
        self.v = [0] * self.env.ncol * self.env.nrow  # 初始化价值为0
        self.pi = [[0.25, 0.25, 0.25, 0.25] for i in range(self.env.ncol * self.env.nrow)]  # 初始化为均匀随机策略
        self.theta = theta  # 策略评估收敛阈值
        self.gamma = gamma  # 折扣因子
    
    def value_evaluation(self):
        cnt = 1
        while 1:
            max_diff = 0
            new_v = [0] * self.env.ncol * self.env.nrow
            for s in range(self.env.ncol * self.env.nrow):
                qsa_list = []
                for a in range(4):
                    qsa = 0
                    for res in self.env.P[s][a]:
                        p, next_state, r, done = res
                        qsa += p * (r + self.gamma * self.v[next_state] * (1-done))
                    qsa_list.append(qsa) # 策略Pi
                new_v[s] = max(qsa_list)  # 更新状态价值函数
                max_diff = max(max_diff, abs(new_v[s] - self.v[s]))
            self.v = new_v
            if max_diff < self.theta: 
                break  # 满足收敛条件
            cnt += 1
        print("Value Evaluation:", cnt)
    
    def policy_improvement(self):
        for s in range(self.env.nrow * self.env.ncol):
            qsa_list = []
            for a in range(4):
                qsa = 0
                for res in self.env.P[s][a]:
                    p, next_state, r, done = res
                    qsa += p * (r + self.gamma * self.v[next_state] * (1-done))
                qsa_list.append(qsa)
            maxq = max(qsa_list)
            cntq = qsa_list.count(maxq)  # 计算有几个动作得到了最大的Q值
            self.pi[s] = [1/cntq if q == maxq else 0 for q in qsa_list]  # 重新分配概率
        print("Policy Improvement")
        return self.pi

    def policy_iteration(self):
        self.value_evaluation()
        self.policy_improvement()


env = CliffWalkingEnv()
print(env.P)
action_meaning = ['^', 'v', '<', '>']
theta = 1e-3
gamma = 0.9
# agent = PolicyIteration(env, theta, gamma)
agent = ValueIteration(env, theta, gamma)
agent.policy_iteration()
print_agent(agent, action_meaning, list(range(37, 47)), [47])