import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F

import flappybird_env
import rl_utils
from argparse import ArgumentParser
from model import PolicyNet, ValueNet


class FlappyBirdPPO:

    def __init__(
        self,
        state_dim,
        hidden_dim,
        action_dim,
        actor_lr,
        critic_lr,
        lmbda,
        epochs,
        eps,
        gamma,
        device,
    ):
        self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
        self.critic = ValueNet(state_dim, hidden_dim).to(device)
        self.actor_optimizer = torch.optim.Adam(
            self.actor.parameters(), lr=actor_lr
        )
        self.critic_optimizer = torch.optim.Adam(
            self.critic.parameters(), lr=critic_lr
        )
        self.gamma = gamma
        self.lmbda = lmbda
        self.epochs = epochs  # 一条序列的数据用来训练轮数
        self.eps = eps  # PPO中截断范围的参数
        self.device = device

    def take_action(self, state):
        state = torch.tensor([state], dtype=torch.float).to(self.device)
        probs = self.actor(state)
        action_dist = torch.distributions.Categorical(probs)
        action = action_dist.sample()
        return action.item()

    def update(self, transition_dict):
        states = torch.tensor(transition_dict["states"], dtype=torch.float).to(
            self.device
        )
        actions = (
            torch.tensor(transition_dict["actions"]).view(-1, 1).to(self.device)
        )
        rewards = (
            torch.tensor(transition_dict["rewards"], dtype=torch.float)
            .view(-1, 1)
            .to(self.device)
        )
        rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-8)
        next_states = torch.tensor(
            transition_dict["next_states"], dtype=torch.float
        ).to(self.device)
        dones = (
            torch.tensor(transition_dict["dones"], dtype=torch.float)
            .view(-1, 1)
            .to(self.device)
        )
        td_target = rewards + self.gamma * self.critic(next_states) * (
            1 - dones
        )
        td_delta = td_target - self.critic(states)
        advantage = rl_utils.compute_advantage(
            self.gamma, self.lmbda, td_delta.cpu()
        ).to(self.device)
        old_probs = self.actor(states).gather(1, actions)
        old_log_probs = torch.log(old_probs).detach()

        for _ in range(self.epochs):
            probs = self.actor(states).gather(1, actions)
            log_probs = torch.log(probs)
            ratio = torch.exp(log_probs - old_log_probs)
            surr1 = ratio * advantage
            surr2 = (
                torch.clamp(ratio, 1 - self.eps, 1 + self.eps) * advantage
            )  # 截断
            actor_loss = torch.mean(-torch.min(surr1, surr2))  # PPO损失函数
            critic_loss = torch.mean(
                F.mse_loss(self.critic(states), td_target.detach())
            )
            self.actor_optimizer.zero_grad()
            self.critic_optimizer.zero_grad()
            actor_loss.backward()
            critic_loss.backward()
            self.actor_optimizer.step()
            self.critic_optimizer.step()
        actor_loss = actor_loss.item()
        critic_loss = critic_loss.item()
        return actor_loss, critic_loss

    def save_pth(self, path):
        torch.save(self.actor.state_dict(), path)


args = ArgumentParser("FlappyBirdPPO")
args.add_argument("--actor_lr", type=float, default=1e-5)
args.add_argument("--critic_lr", type=float, default=1e-4)
args.add_argument("--num_episodes", type=int, default=2000)
args.add_argument("--hidden_dim", type=int, default=256)
args.add_argument("--gamma", type=float, default=0.98)
args.add_argument("--lmbda", type=float, default=0.95)
args.add_argument("--epochs", type=int, default=100)
args.add_argument("--eps", type=float, default=0.2)
args.add_argument("--touch_ground_reward", type=float, default=-1.)
args.add_argument("--touch_top_reward", type=float, default=-0.5)
args.add_argument("--touch_pipe_reward", type=float, default=-1.0)
args.add_argument("--pass_pipe_reward", type=float, default=1.)
args.add_argument("--normal_reward", type=float, default=0.1)
args.add_argument("--near_pipe_reward", type=float, default=0.2)
args = args.parse_args()
action_lr = args.actor_lr
critic_lr = args.critic_lr
num_episodes = args.num_episodes
hidden_dim = args.hidden_dim
gamma = args.gamma
lmbda = args.lmbda
epochs = args.epochs
eps = args.eps
touch_ground_reward = args.touch_ground_reward
touch_top_reward = args.touch_top_reward
touch_pipe_reward = args.touch_pipe_reward
pass_pipe_reward = args.pass_pipe_reward
normal_reward = args.normal_reward
near_pipe_reward = args.near_pipe_reward
device = (
    torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)

env_name = "FlappyBird-v0"
env = flappybird_env.flappybird(touch_ground_reward, touch_top_reward, touch_pipe_reward, pass_pipe_reward, normal_reward, near_pipe_reward)
env.seed(0)
torch.manual_seed(0)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = FlappyBirdPPO(
    state_dim,
    hidden_dim,
    action_dim,
    action_lr,
    critic_lr,
    lmbda,
    epochs,
    eps,
    gamma,
    device,
)

return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)

episodes_list = list(range(len(return_list)))
plt.plot(episodes_list, return_list)
plt.xlabel("Episodes")
plt.ylabel("Returns")
plt.title("FlappyBirdPPO on {}".format(env_name))
plt.savefig("FlappyBirdPPO.png")
