import torch
import os
import sys
import numpy as np
from tqdm import tqdm
from typing import List
import gymnasium as gym
import matplotlib.pyplot as plt

from PolicyNet import PolicyNet

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import rl_utils


class REINFORCE:
    def __init__(
        self,
        state_dim,
        hidden_dim,
        action_dim,
        learning_rate,
        gamma,
        device,
    ):
        self.state_dim = state_dim
        self.policy_net = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
        self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=learning_rate)
        self.gamma = gamma
        self.device = device

    def take_action(self, state):
        state = torch.tensor(np.array([state]), dtype=torch.float).to(self.device)
        probs = self.policy_net(state)
        action_dist = torch.distributions.Categorical(probs)
        action = action_dist.sample().item()

        return action

    def update(
        self,
        rewards: List,
        states: List,
        actions: List,
    ):
        Q = 0
        self.optimizer.zero_grad()
        for i in reversed(range(len(rewards))):
            reward = rewards[i]
            state = (
                torch.tensor(np.array([states[i]]), dtype=torch.float)
                .view(-1, self.state_dim)
                .to(self.device)
            )
            action = torch.tensor(np.array([actions[i]])).view(-1, 1).to(self.device)
            log_probs = torch.log(self.policy_net(state).gather(1, action))
            Q = self.gamma * Q + reward
            loss = -Q * log_probs
            loss.backward()  # 这样做是正确的。虽然正确的梯度 dJ = Q * d(log pi)，但由于 Q 与参数无关，其梯度为零，因此 dJ = Q * d(log pi) = d(Q * log pi)，也就是求 Q * log pi 的梯度。
        self.optimizer.step()


if __name__ == "__main__":
    learning_rate = 1e-3
    num_episodes = 1000
    hidden_dim = 128
    gamma = 0.98
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    env_name = "CartPole-v0"
    env = gym.make(env_name)
    env.reset(seed=0)
    torch.manual_seed(0)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n

    agent = REINFORCE(state_dim, hidden_dim, action_dim, learning_rate, gamma, device)
    return_list = []
    for i in range(10):
        with tqdm(total=int(num_episodes / 10), desc="Iteration %d" % i) as pbar:
            for i_episode in range(int(num_episodes / 10)):
                episode_return = 0

                states = []
                actions = []
                next_states = []
                rewards = []
                dones = []

                state = env.reset()[0]
                done = False
                while not done:
                    action = agent.take_action(state)
                    next_state, reward, terminated, truncated, _ = env.step(action)
                    done = terminated or truncated  # 合并 terminated 和 truncated

                    states.append(state)
                    actions.append(action)
                    next_states.append(next_state)
                    rewards.append(reward)
                    dones.append(done)

                    state = next_state
                    episode_return += reward

                agent.update(rewards, states, actions)
                return_list.append(episode_return)
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix(
                        {
                            "episode": "%d" % (num_episodes / 10 * i + i_episode + 1),
                            "return": "%.3f" % np.mean(return_list[-10:]),
                        }
                    )
                pbar.update(1)

    episodes_list = list(range(len(return_list)))
    plt.plot(episodes_list, return_list)
    plt.xlabel("Episodes")
    plt.ylabel("Returns")
    plt.title("REINFORCE on {}".format(env_name))
    plt.show()

    mv_return = rl_utils.moving_average(return_list, 9)
    plt.plot(episodes_list, mv_return)
    plt.xlabel("Episodes")
    plt.ylabel("Returns")
    plt.title("REINFORCE on {}".format(env_name))
    plt.show()
