import sys
import gym
import numpy as np
import torch  # torch.cuda.is_available()
from torch import nn
from torch.distributions import Categorical, Normal

from common import Agent, Logger, tensorize


class PGAgent(Agent):
    def __init__(self, state_space, action_space):
        super().__init__(state_space, action_space)
        self.build_actor()

    def build_actor(self):
        actor_layer = (self.state_dim,) + (256, 256) + (self.action_dim,)
        self.actor = self.build_network(actor_layer, nn.Tanh, nn.Identity)
        if self.space == "box":
            log_std = -0.5 * np.ones(self.action_dim, np.float32)
            log_std = torch.as_tensor(log_std, dtype=torch.float32)
            self.actor.log_std = torch.nn.Parameter(log_std)
        self.policy_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)

    def get_action(self, state):
        state = torch.as_tensor(state, dtype=torch.float32)  # gym默认输出numpy array
        actions = Categorical(logits=self.actor(state))  # discrete离散动作空间
        if self.space == "box":  # box连续动作空间，由固定std的高斯分布采样
            actions = Normal(self.actor(state), torch.exp(self.actor.log_std))
        return actions.sample(), actions

    def log_action(self, action, actions):  # log计算采样动作在全动作概率中的概率
        log_action = actions.log_prob(action)  # 离散动作空间
        if self.space == "box": log_action = log_action.sum(axis=-1)  # 连续动作空间
        return log_action

    def update(self, buffer):
        state_bf, action_bf, return_bf = buffer.extract()
        for i in range(80):
            _, actions = self.get_action(state_bf)  # 返回动作采样与动作分布，此处仅需分布
            action_log_probs = self.log_action(action_bf, actions)
            loss = -(action_log_probs * return_bf).mean()
            self.optimize(loss, self.policy_optimizer)


class Buffer:  # PG算法需完整路径，长度不固定
    def __init__(self):
        self.states, self.actions, self.returns = [], [], []
        self.step, self.start_step = 0, 0

    def store(self, state, action, reward):
        self.states.append(state)
        self.actions.append(action)
        self.returns.append(reward)
        self.step += 1

    def compute_returns(self):
        temp_return = 0  # 终态价值固定为0
        for i in reversed(range(self.start_step, self.step)):
            temp_return = self.returns[i] + 0.99 * temp_return
            self.returns[i] = temp_return
        self.start_step = self.step  # 当前路径终点为下一路径起点

    def extract(self):
        s, a, r = tensorize([self.states, self.actions, self.returns])
        self.states, self.actions, self.returns = [], [], []
        self.start_step, self.step = 0, 0
        return s, a, r


env = gym.make('CartPole-v1')  # Hopper-v4 MountainCarContinuous-v0
agent = PGAgent(env.observation_space, env.action_space)
buffer, sys.stdout = Buffer(), Logger()

for epoch in range(1000):
    state, done = env.reset()[0], False
    while True:
        action = agent.get_action(state)[0].numpy()
        next_state, reward, done, _, _ = env.step(action)
        buffer.store(state, action, reward)
        state = next_state
        if done:  # PG算法不接受中途截断的路径，但可以多条路径一并训练
            buffer.compute_returns()
            if buffer.step >= 4000:  # 收集足够多路径后再更新
                print(f"Reward:{np.mean(buffer.returns):.3f}")  # 查看收敛过程
                agent.update(buffer)
                break
            state, done = env.reset()[0], False
