import os
import time
import collections

import cv2
import numpy as np
import torch
from torch.nn import Sequential as S, Linear as L, Conv2d as C, ReLU as R, Flatten as F
from torch.nn.functional import smooth_l1_loss as l1

import gym_super_mario_bros
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT

torch.set_default_device(torch.device("cuda:0"))


def t(array): return torch.as_tensor(array, dtype=torch.float32)
def z(bs, s=None): return np.zeros((bs, s), np.float32) if s else np.zeros(bs, np.float32)
def op(loss, optimizer): return [optimizer.zero_grad(), loss.backward(), optimizer.step()]


def gray(frame):
    frame = np.reshape(frame, [240, 256, 3]).astype(np.float32)
    frame = frame[:, :, 0] * 0.299 + frame[:, :, 1] * 0.587 + frame[:, :, 2] * 0.114
    frame = cv2.resize(frame, (84, 110), interpolation=cv2.INTER_AREA)[18:102, :]
    return np.reshape(frame.astype(np.uint8), [1, 84, 84])


class Wrapper:  # state为84x84叠加4层的灰度图，每层取连续2帧图像的最大值
    def __init__(self):
        self.env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0').env
        self.state, self.stack = z((4, 84, 84)), collections.deque(maxlen=2)

    def ob(self, frame):
        self.state[:-1] = self.state[1:]
        self.state[-1] = gray(frame)
        return np.array(self.state, np.float32) / 255.0

    def step(self, a):
        tr, d = 0.0, False
        for _ in range(4):
            s, r, d, _ = self.env.step(a)
            self.stack.append(s)
            tr += r
            if d: break
        return self.ob(np.max(np.stack(self.stack), axis=0)), tr, d

    def reset(self):
        self.state, s = z((4, 84, 84)), self.env.reset()
        self.stack.clear()
        self.stack.append(s)
        return self.ob(s)


class Agent:
    def __init__(self):
        self.n, self.tn, self.exp = self.net(), self.net(), 1.0
        self.op = torch.optim.Adam(self.n.parameters(), lr=0.00025)

    def net(self):
        l1 = C(4, 32, (8, 8), (4, 4))
        l2 = C(32, 64, (4, 4), (2, 2))
        l3 = C(64, 64, (3, 3), (1, 1))
        return S(l1, R(), l2, R(), l3, R(), F(start_dim=1), L(3136, 512), R(), L(512, 7))

    def act(self, s):
        if np.random.rand() < self.exp: return np.random.randint(7)
        return torch.argmax(self.n(t(s).unsqueeze(0))).item()

    def up(self, buffer):
        sb, ab, rb, nsb, db = buffer.ex()
        q = self.n(sb).gather(dim=1, index=ab.unsqueeze(1)).squeeze(axis=1)
        q_target = rb + (1 - db) * torch.max(self.tn(nsb), dim=1).values
        op(l1(q, q_target), self.op)
        self.exp = max(0.99 * self.exp, 0.03)

    def tup(self):
        with torch.no_grad():
            for para, para_targ in zip(self.n.parameters(), self.tn.parameters()):
                para_targ.data = para.data.clone()


class Buffer:
    def __init__(self, bs):
        self.sb, self.nsb, self.rb, self.db = z((bs, 4, 84, 84)), z((bs, 4, 84, 84)), z(bs), z(bs)
        self.ab, self.size, self.ss, self.s = np.zeros(bs, np.int64), bs, 0, 0

    def store(self, state, action, reward, next_state, done):
        s, v = self.s, (state, action, reward, next_state, done)
        self.sb[s], self.ab[s], self.rb[s], self.nsb[s], self.db[s] = v
        self.s, self.ss = (s + 1) % self.size, self.ss + 1

    def ex(self):
        i = np.random.randint(0, min(self.ss, self.size), size=32)
        actions = torch.as_tensor(self.ab[i], dtype=torch.int64)
        return t(self.sb[i]), actions, t(self.rb[i]), t(self.nsb[i]), t(self.db[i])


env = JoypadSpace(Wrapper(), SIMPLE_MOVEMENT)
ag, b, train = Agent(), Buffer(30000), True
if os.path.exists("mario.pt"): ag.n.load_state_dict(torch.load(f"mario.pt"))

if train:
    for e in range(1000):
        s = env.reset()
        while True:
            a = ag.act(s)
            ns, r, d = env.step(a)
            b.store(s, a, r, ns, d)
            if b.ss >= 32: ag.up(b)
            if b.ss % 5000 == 0: ag.tup()
            if d: break
            s = ns
        print(f"Epoch:{e} Reward:{np.sum(b.rb) / b.ss:.3f}")
        if (e+1) % 100 == 0: torch.save(ag.n.state_dict(), f"mario.pt")
else:
    state = env.reset()
    while True:
        # env.env.env.render()
        time.sleep(0.02)
        action = ag.act(state)
        next_state, _, done = env.step(action)
        if done: break
        state = next_state
