import time
import numpy as np
from pynput.keyboard import Listener as KL

from interact import Env
from reward import Reward

import torch
from torch.nn import Tanh as T, Identity as I
from torch.nn import Sequential as S, Linear as L, Conv2d as C, ReLU as R, Flatten as F
from torch.optim import Adam as A
from torch.distributions import Categorical
torch.set_default_device(torch.device("cuda:0"))


def z(bs, s=None): return np.zeros((bs, s), np.float32) if s else np.zeros(bs, np.float32)
def t(array): return torch.as_tensor(array, dtype=torch.float32)
def q(array): return torch.squeeze(array, dim=-1)
def op(l, op): return [op.zero_grad(), l.backward(), op.step()]
def net(s, e): return S(L(s, 128), T(), L(128, 128), T(), L(128, e), I())





def start():
    run, kl = [0, 1], KL(on_press=lambda key: run.reverse() if key.__dict__.get("char") == "`" else 0)
    kl.start()
    while run[-1]: time.sleep(0.2)
    kl.stop()


class Agent(torch.nn.Module):  # 变更为TD算法
    def __init__(self):
        super().__init__()
        l1 = C(1, 4, (4, 4), (2, 2))
        l2 = C(4, 8, (3, 3), (2, 2))
        l3 = C(8, 16, (3, 3), (2, 2))
        l4 = C(16, 32, (3, 3), (2, 2))
        l5 = C(32, 64, (3, 3), (1, 1))
        l6 = C(64, 128, (3, 3), (1, 1))
        self.f = S(l1, R(), l2, R(), l3, R(), l4, R(), l5, R(), l6, R(), F(start_dim=1))

        self.ac, self.cr = net(128, 4), net(128, 1)
        self.op = A(self.parameters(), lr=1e-3)

    def act(self, s):
        f = self.f(t([s]))
        logits = self.ac(f)
        print(np.array(logits.cpu().detach()))
        p, v = Categorical(logits=logits), q(self.cr(f)).detach()
        a = p.sample()
        return a.cpu().numpy()[0], p.log_prob(a).detach().cpu().numpy()[0], v.cpu().numpy()[0]

    def up(self, b):
        sb, ab, rb, lb, db = b.ex()
        for _ in range(8):
            f = self.f(t(sb))
            l = Categorical(logits=self.ac(f)).log_prob(ab)
            r = torch.exp(l - lb)  # 新老策略比值
            loss = -torch.min(r * db, torch.clip(r, 0.8, 1.2) * db).mean()
            loss += 3 * ((q(self.cr(f)) - rb) ** 2).mean()
            op(loss, self.op)
            if (l - lb).mean().item() > 0.015: break


class Buffer:
    def __init__(self, bs):
        self.sb, self.ab, self.rb, self.vb, self.lb, self.db = z((bs, 1, 96, 96)), z(bs), z(bs), z(bs), z(bs), z(bs)
        self.size, self.s, self.ss = bs, 0, 0

    def st(self, state, action, reward, log_prob, state_value):
        s, v = self.s, (state, action, reward, log_prob, state_value)
        self.sb[s], self.ab[s], self.rb[s], self.lb[s], self.vb[s] = v
        self.s = (s + 1) % self.size

    def ex(self):
        db = (self.db - np.mean(self.db)) / (np.std(self.db) + 1e-8)
        return [t(a) for a in (self.sb, self.ab, self.rb, self.lb, db)]

    def cr(self, lsv):
        t, sl = lsv, slice(self.ss, self.s)
        for i in reversed(range(self.ss, self.s)):
            t = self.rb[i] + 0.99 * t
            self.rb[i] = t
        self.db[sl] = self.rb[sl] - self.vb[sl]
        self.ss = self.s


start()  # 按`键启动

env, ag, b, rm = Env(), Agent(), Buffer(200), Reward()

# Version 1.0: 无奖励随机探索
for epoch in range(5000):
    s = env.reset()
    for _ in range(200):  # 一轮无限制，每2000step更新一次
        a, lp, v = ag.act(s)
        ns, r = env.step(a)  # TODO 暂时先按动作返回奖励，鼓励模型执行某个动作
        # r = rm.get_reward(ns)
        b.st(s, a, r, lp, v)
        s = ns

    b.cr(ag)
    ag.up(b)
    print(f"Epoch:{epoch} Reward:{np.mean(b.rb):.3f}")
