import sys
import gym
import numpy as np
import torch  # torch.cuda.is_available()
from torch import nn
from torch.distributions import Categorical, Normal

from common import Agent, Logger, np_init


class PPOAgent(Agent):
    def __init__(self, state_space, action_space):
        super().__init__(state_space, action_space)
        self.build_actor_critic()

    def build_actor_critic(self):
        actor_layer = (self.state_dim,) + (256, 256) + (self.action_dim,)
        self.actor = self.build_network(actor_layer, nn.Tanh, nn.Identity)
        if self.space == "box":
            log_std = -0.5 * np.ones(self.action_dim, np.float32)
            log_std = torch.as_tensor(log_std, dtype=torch.float32)
            self.actor.log_std = torch.nn.Parameter(log_std)
        self.policy_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)

        critic_layer = (self.state_dim,) + (256, 256) + (1,)
        self.critic = self.build_network(critic_layer, nn.Tanh, nn.Identity)
        self.value_optimizer = torch.optim.Adam(self.critic.parameters(), lr=1e-3)

    def get_action(self, state):
        state = torch.as_tensor(state, dtype=torch.float32)  # gym默认输出numpy array
        actions = Categorical(logits=self.actor(state))  # discrete离散动作空间
        if self.space == "box":  # box连续动作空间，由固定std的高斯分布采样
            actions = Normal(self.actor(state), torch.exp(self.actor.log_std))
        return actions.sample(), actions

    def get_value(self, state):
        state = torch.as_tensor(state, dtype=torch.float32)
        return torch.squeeze(self.critic(state), dim=-1)

    def log_action(self, action, actions):  # log计算采样动作在全动作概率中的概率
        log_action = actions.log_prob(action)  # 离散动作空间
        if self.space == "box": log_action = log_action.sum(axis=-1)  # 连续动作空间
        return log_action

    def choose_action(self, state):
        action, actions = self.get_action(state)  # 返回动作采样与动作分布
        action_log_probs = self.log_action(action, actions).detach()
        state_value = self.get_value(state).detach()
        return action.numpy(), action_log_probs.numpy(), state_value.numpy()

    def update(self, buffer):
        state_bf, action_bf, return_bf, action_log_prob_bf, advantage_bf = buffer.extract()
        for _ in range(80):
            _, actions = self.get_action(state_bf)
            action_log_probs = self.log_action(action_bf, actions)
            ratio = torch.exp(action_log_probs - action_log_prob_bf)
            surr_loss, clip_loss = ratio * advantage_bf, torch.clip(ratio, 0.8, 1.2) * advantage_bf
            policy_loss = - torch.min(surr_loss, clip_loss).mean()

            self.optimize(policy_loss, self.policy_optimizer)
            approx_kl = (action_log_probs - action_log_prob_bf).mean().item()
            if approx_kl > 1.5 * 0.01: break

        for _ in range(80):
            state_values = self.get_value(state_bf)
            value_loss = ((state_values - return_bf) ** 2).mean()
            self.optimize(value_loss, self.value_optimizer)


class Buffer:  # 定长Buffer
    def __init__(self, buffer_size, state_shape, action_shape=None):
        self.states = np_init(buffer_size, state_shape[0])
        self.actions = np_init(buffer_size, action_shape[0]) if action_shape else np_init(buffer_size)
        self.returns, self.state_values, self.actions_log_probs, self.advantages = np_init(buffer_size, num=4)
        self.size, self.step, self.start_step, self.full = buffer_size, 0, 0, False

    def store(self, state, action, reward, actions_log_prob, state_value):
        self.states[self.step] = state
        self.actions[self.step] = action
        self.returns[self.step] = reward
        self.actions_log_probs[self.step] = actions_log_prob
        self.state_values[self.step] = state_value

        self.full = True if self.step == self.size - 1 else False
        self.step = (self.step + 1) % self.size

    def compute_returns(self, last_state_value):  # 截断的终态状态价值不为0
        temp_return = last_state_value
        for i in reversed(range(self.start_step, self.step)):
            temp_return = self.returns[i] + 0.99 * temp_return
            self.returns[i] = temp_return
        episode = slice(self.start_step, self.step)  # 计算advantage
        self.advantages[episode] = self.returns[episode] - self.state_values[episode]
        self.start_step = self.step

    def extract(self):
        advs = (self.advantages - np.mean(self.advantages)) / np.std(self.advantages)
        array_list = (self.states, self.actions, self.returns, self.actions_log_probs, advs)
        return [torch.as_tensor(a, dtype=torch.float32) for a in array_list]


env = gym.make('CartPole-v1')  # Hopper-v4
agent = PPOAgent(env.observation_space, env.action_space)
buffer = Buffer(4000, env.observation_space.shape, env.action_space.shape)
sys.stdout = Logger()

for epoch in range(1000):  # 总训练轮数: 1000
    state, done, episode_step = env.reset(seed=0)[0], False, 0
    for step in range(4000):  # 单轮step最大4000，单局episode_step最大1000
        action, action_log_prob, state_value = agent.choose_action(state)
        next_state, reward, done, _, _ = env.step(action)
        buffer.store(state, action, reward, action_log_prob, state_value)
        episode_step, state = episode_step + 1, next_state

        # 单局终止或单轮终止时计算当前路径return，存在路径截断，所以要计算终态状态价值
        if done or episode_step == 1000 or step == 4000 - 1:
            last_state_value = (1 - done) * agent.choose_action(next_state)[2]
            buffer.compute_returns(last_state_value)

            if buffer.full: agent.update(buffer)  # 存满更新，buffer大小等于max_step，所以也是单轮更新

            if done or episode_step == 1000:  # 单局终止时重置环境，单轮终止时不重置也无影响
                state, done, episode_step = env.reset(seed=0)[0], False, 0

    print(f"Reward:{np.mean(buffer.returns):.3f}")  # Update不清空Buffer，直接查看收敛过程
