import gym
import numpy as np
import torch
from torch import nn
from torch.nn import Sequential as S, Linear as L, Conv2d as C, ReLU as R, Flatten as F
from torch.distributions import Beta as B
from torch.nn.functional import smooth_l1_loss as l1

torch.set_default_device(torch.device("cuda:0"))

class Car:
    def __init__(self):
        self.env = gym.make('CarRacing-v2', verbose=0)

    def gray(self, s):
        return np.dot(s[..., :], [0.299, 0.587, 0.114]) / 127

    def reset(self):
        self.rtb, self.s, self.stack = np.zeros(100), 0, [self.gray(self.env.reset()[0])] * 4
        return np.array(self.stack)

    def step(self, action):
        sr, lr = 0, False
        for _ in range(10):
            s, r, d, _, _ = self.env.step(action)
            if d: r += 100
            if np.mean(s[:, :, 1]) > 185.0: r -= 0.05
            self.rtb[self.s % 100] = r
            if np.mean(self.rtb) <= -0.1: lr = True
            self.s += 1
            sr += r
            if lr or d: break
        self.stack.pop(0)
        self.stack.append(self.gray(s))
        return np.array(self.stack), sr, lr, d


def e(s): return np.zeros(s, np.float32)
def t(array): return torch.as_tensor(array, dtype=torch.float32)
def q(array): return torch.squeeze(array, dim=-1)
def op(l, op): return [op.zero_grad(), l.backward(), op.step()]


class Agent(nn.Module):  # 用于自动收集参数
    def __init__(self):
        super().__init__()
        l1 = C(4, 8, (4, 4), (2, 2))
        l2 = C(8, 16, (3, 3), (2, 2))
        l3 = C(16, 32, (3, 3), (2, 2))
        l4 = C(32, 64, (3, 3), (2, 2))
        l5 = C(64, 128, (3, 3), (1, 1))
        l6 = C(128, 256, (3, 3), (1, 1))
        self.f = S(l1, R(), l2, R(), l3, R(), l4, R(), l5, R(), l6, R(), F(start_dim=1))

        self.v = S(L(256, 100), R(), L(100, 1))
        self.lf = S(L(256, 100), R())
        self.alpha = S(L(100, 3), nn.Softplus())
        self.beta = S(L(100, 3), nn.Softplus())

    def act(self, s):
        lf = self.lf(self.f(t([s])))  # 单state与buffer维度一致
        p = B(self.alpha(lf), self.beta(lf))
        a = torch.squeeze(p.sample(), dim=0).detach()
        lp = torch.squeeze(p.log_prob(a), dim=0).detach()
        return a.cpu().numpy(), lp.cpu().numpy()

    def up(self, b):
        for _ in range(8):
            for sb, ab, rtb, lb, db in b.ex(128):
                lf = self.lf(self.f(t(sb)))  # 单state与buffer维度一致
                p = B(self.alpha(lf), self.beta(lf))
                r = torch.exp(p.log_prob(ab) - lb)
                loss = -torch.min(r * db, torch.clamp(r, 0.9, 1.1) * db).mean()  # clip
                loss += 2. * l1(q(self.v(self.f(t(sb)))), rtb)
                op(loss, torch.optim.Adam(self.parameters(), lr=1e-3))


class Buffer:
    def __init__(self, bs):
        self.sb, self.nsb = e((bs, 4, 96, 96)), e((bs, 4, 96, 96))
        self.ab, self.lb, self.rwb, self.rtb, self.db = e((bs, 3)), e((bs, 3)), e(bs), e(bs), e(bs)
        self.size, self.ss, self.s, self.f = bs, 0, 0, False

    def st(self, state, action, reward, next_state, log_prob):
        s, v = self.s, (state, action, reward, next_state, log_prob)
        self.sb[s], self.ab[s], self.rwb[s], self.nsb[s], self.lb[s] = v
        self.s = (s + 1) % self.size

    def cr(self, agent):  # TD return
        sv = q(agent.v(agent.f(t(self.sb)))).detach().cpu().numpy()
        nsv = q(agent.v(agent.f(t(self.nsb)))).detach().cpu().numpy()
        self.rtb = self.rwb + 0.99 * nsv  # d=(d-d.mean())/(d.std()+1e-8)
        self.db = np.reshape(self.rtb - sv, (self.size, 1))

    def ex(self, bs):  # 在线更新，不重复采样经验
        all_index = np.arange(self.size)
        np.random.shuffle(all_index)

        i = 0
        while i < len(all_index):
            sl = all_index[i:i + bs]
            yield t(self.sb[sl]), t(self.ab[sl]), t(self.rtb[sl]), t(self.lb[sl]), t(self.db[sl])
            i += bs


env = Env()
ag = Agent()
b = Buffer(2000)
for epoch in range(5000):
    s, er = env.reset(), 0
    while True:  # 不固定step数
        a, lp = ag.act(s)
        ns, r, lr, d = env.step(a * np.array([2., 1., 1.]) + np.array([-1., 0., 0.]))
        b.st(s, a, r, ns, lp)
        if b.s == 0:
            b.cr(ag)
            ag.up(b)
        s, er = ns, er + r
        if lr or d: break
    print(f"Epoch:{epoch} Reward:{er:.3f}")
