import gym
import numpy as np
import torch  # torch.cuda.is_available()
from torch.nn import Sequential as S, Linear as L, Tanh as T, Identity as I
from torch.optim import Adam as A
from torch.distributions import Categorical as C


def t(array): return torch.as_tensor(array, dtype=torch.float32)
def q(array): return torch.squeeze(array, dim=-1)
def op(loss, optimizer): return [optimizer.zero_grad(), loss.backward(), optimizer.step()]
def net(s, e): return S(L(s, 256), T(), L(256, 256), T(), L(256, e), I())
def z(bs, s=None): return np.zeros((bs, s), np.float32) if s else np.zeros(bs, np.float32)


class Agent:
    def __init__(self, sd, ad):
        self.ac, self.cr = net(sd, ad), net(sd, 1)
        self.po, self.vo = A(self.ac.parameters(), lr=3e-4), A(self.cr.parameters(), lr=1e-3)

    def act(self, s):
        p = C(logits=self.ac(t(s)))
        a = p.sample()
        return a.numpy(), p.log_prob(a).detach().numpy(), q(self.cr(t(s))).detach().numpy()

    def up(self, b):
        sb, ab, rb, lb, db = b.ex()
        for _ in range(80):
            l = C(logits=self.ac(t(sb))).log_prob(ab)
            r = torch.exp(l - lb)  # 新老策略比值
            op(-torch.min(r * db, torch.clip(r, 0.8, 1.2) * db).mean(), self.po)
            if (l - lb).mean().item() > 0.015: break
        for _ in range(80): op(((q(self.cr(t(sb))) - rb) ** 2).mean(), self.vo)


class Buffer:
    def __init__(self, bs, sd):
        self.sb, self.ab, self.rb, self.vb, self.lb, self.db = z(bs, sd), z(bs), z(bs), z(bs), z(bs), z(bs)
        self.size, self.s, self.ss = bs, 0, 0

    def st(self, state, action, reward, log_prob, state_value):
        s, v = self.s, (state, action, reward, log_prob, state_value)
        self.sb[s], self.ab[s], self.rb[s], self.lb[s], self.vb[s] = v
        self.s = (s + 1) % self.size
        
    def ex(self):
        db = (self.db - np.mean(self.db)) / np.std(self.db)
        return [t(a) for a in (self.sb, self.ab, self.rb, self.lb, db)]

    def cr(self, lsv):
        t, sl = lsv, slice(self.ss, self.s)
        for i in reversed(range(self.ss, self.s)):
            t = self.rb[i] + 0.99 * t
            self.rb[i] = t
        self.db[sl] = self.rb[sl] - self.vb[sl]
        self.ss = self.s


env = gym.make('CartPole-v1')  # Hopper-v4
ag = Agent(env.observation_space.shape[0], env.action_space.n)
b = Buffer(4000, env.observation_space.shape[0])

for ep in range(1000):
    s, d, es = env.reset(seed=0)[0], False, 0
    for i in range(4000):
        a, l, v = ag.act(s)
        ns, r, d, _, _ = env.step(a)
        b.st(s, a, r, l, v)
        es, s = es + 1, ns

        if d or es == 1000 or i == 4000 - 1:
            b.cr((1 - d) * ag.act(ns)[2])
            if b.s == 0: ag.up(b)
            if d or es == 1000: s, d, es = env.reset(seed=0)[0], False, 0
    print(f"Reward:{np.mean(b.rb):.3f}")  # Update不清空Buffer，直接查看收敛过程
