# 这里我将实验3的代码全部放在一起了
import numpy as np
import gym
import matplotlib.pyplot as plt
from collections import defaultdict


# 策略迭代类
class PolicyIteration:
    def __init__(self, env, gamma=0.9, theta=1e-5):
        self.env = env
        self.gamma = gamma
        self.theta = theta
        self.v = np.zeros(env.observation_space.n)
        self.pi = np.ones((env.observation_space.n, env.action_space.n)) / env.action_space.n

    def policy_evaluation(self):
        while True:
            delta = 0
            for s in range(self.env.observation_space.n):
                v = 0
                for a in range(self.env.action_space.n):
                    for prob, next_state, reward, done in self.env.P[s][a]:
                        v += self.pi[s][a] * prob * (reward + self.gamma * self.v[next_state])
                delta = max(delta, np.abs(v - self.v[s]))
                self.v[s] = v
            if delta < self.theta:
                break

    def policy_improvement(self):
        policy_stable = True
        for s in range(self.env.observation_space.n):
            old_action = np.argmax(self.pi[s])
            action_values = np.zeros(self.env.action_space.n)
            for a in range(self.env.action_space.n):
                for prob, next_state, reward, done in self.env.P[s][a]:
                    action_values[a] += prob * (reward + self.gamma * self.v[next_state])
            best_action = np.argmax(action_values)
            if old_action != best_action:
                policy_stable = False
            self.pi[s] = np.eye(self.env.action_space.n)[best_action]
        return policy_stable

    def policy_iteration(self):
        while True:
            self.policy_evaluation()
            if self.policy_improvement():
                break
        return self.pi


# 价值迭代类
class ValueIteration:
    def __init__(self, env, gamma=0.9, theta=1e-5):
        self.env = env
        self.gamma = gamma
        self.theta = theta
        self.v = np.zeros(env.observation_space.n)

    def value_iteration(self):
        while True:
            delta = 0
            for s in range(self.env.observation_space.n):
                v = self.v[s]
                action_values = np.zeros(self.env.action_space.n)
                for a in range(self.env.action_space.n):
                    for prob, next_state, reward, done in self.env.P[s][a]:
                        action_values[a] += prob * (reward + self.gamma * self.v[next_state])
                self.v[s] = np.max(action_values)
                delta = max(delta, np.abs(v - self.v[s]))
            if delta < self.theta:
                break
        pi = np.zeros((self.env.observation_space.n, self.env.action_space.n))
        for s in range(self.env.observation_space.n):
            action_values = np.zeros(self.env.action_space.n)
            for a in range(self.env.action_space.n):
                for prob, next_state, reward, done in self.env.P[s][a]:
                    action_values[a] += prob * (reward + self.gamma * self.v[next_state])
            best_action = np.argmax(action_values)
            pi[s][best_action] = 1
        return pi


# SARSA 类
class Sarsa:
    def __init__(self, n_actions, lr=0.1, gamma=0.9, epsilon=0.3):
        self.n_actions = n_actions
        self.lr = lr
        self.gamma = gamma
        self.epsilon = epsilon
        self.Q = defaultdict(lambda: np.zeros(n_actions))

    def sample(self, state):
        if np.random.uniform(0, 1) > self.epsilon:
            action = np.argmax(self.Q[state])
        else:
            action = np.random.choice(self.n_actions)
        return action

    def update(self, state, action, reward, next_state, next_action, done):
        if done:
            target = reward
        else:
            target = reward + self.gamma * self.Q[next_state][next_action]
        self.Q[state][action] += self.lr * (target - self.Q[state][action])


# Q - learning 类
class QLearning:
    def __init__(self, n_actions, lr=0.1, gamma=0.9, epsilon=0.3):
        self.n_actions = n_actions
        self.lr = lr
        self.gamma = gamma
        self.epsilon = epsilon
        self.Q = defaultdict(lambda: np.zeros(n_actions))

    def sample(self, state):
        if np.random.uniform(0, 1) > self.epsilon:
            action = np.argmax(self.Q[state])
        else:
            action = np.random.choice(self.n_actions)
        return action

    def update(self, state, action, reward, next_state, done):
        if done:
            target = reward
        else:
            target = reward + self.gamma * np.max(self.Q[next_state])
        self.Q[state][action] += self.lr * (target - self.Q[state][action])


# 训练 SARSA 函数
def train_sarsa(env, agent, train_eps=400):
    train_rewards = []
    for i_ep in range(train_eps):
        state = env.reset()
        action = agent.sample(state)
        ep_reward = 0
        while True:
            next_state, reward, done, _ = env.step(action)
            next_action = agent.sample(next_state)
            agent.update(state, action, reward, next_state, next_action, done)
            state, action = next_state, next_action
            ep_reward += reward
            if done:
                break
        train_rewards.append(ep_reward)
    return train_rewards


# 训练 Q - learning 函数
def train_qlearning(env, agent, train_eps=400):
    train_rewards = []
    for i_ep in range(train_eps):
        state = env.reset()
        ep_reward = 0
        while True:
            action = agent.sample(state)
            next_state, reward, done, _ = env.step(action)
            agent.update(state, action, reward, next_state, done)
            state = next_state
            ep_reward += reward
            if done:
                break
        train_rewards.append(ep_reward)
    return train_rewards


# 主函数
if __name__ == "__main__":
    # 策略迭代和价值迭代使用 FrozenLake-v1 环境
    env_frozen = gym.make('FrozenLake-v1', desc=None, map_name="4x4", is_slippery=False)

    # 策略迭代
    policy_iter = PolicyIteration(env_frozen)
    optimal_policy_pi = policy_iter.policy_iteration()
    print("策略迭代得到的最优策略：", optimal_policy_pi)

    # 价值迭代
    value_iter = ValueIteration(env_frozen)
    optimal_policy_vi = value_iter.value_iteration()
    print("价值迭代得到的最优策略：", optimal_policy_vi)

    # SARSA 和 Q - learning 使用 CliffWalking-v0 环境
    env_cliff = gym.make('CliffWalking-v0')

    # SARSA 训练
    sarsa_agent = Sarsa(env_cliff.action_space.n)
    sarsa_rewards = train_sarsa(env_cliff, sarsa_agent)

    # Q - learning 训练
    qlearning_agent = QLearning(env_cliff.action_space.n)
    qlearning_rewards = train_qlearning(env_cliff, qlearning_agent)

    # 绘制 SARSA 训练奖励曲线
    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(sarsa_rewards)
    plt.xlabel('Episodes')
    plt.ylabel('Rewards')
    plt.title('SARSA Training Rewards')

    # 绘制 Q - learning 训练奖励曲线
    plt.subplot(1, 2, 2)
    plt.plot(qlearning_rewards)
    plt.xlabel('Episodes')
    plt.ylabel('Rewards')
    plt.title('Q - learning Training Rewards')

    plt.show()
    