# 深度强化学习（中文版）
from os import sync
import random
import time
from typing import Tuple
import gym
import numpy as np
import tensorflow as tf
from gym.core import Env
env = gym.make("BreakoutNoFrameskip-v4")

o = env.reset()

while True:
    env.render()
    a = env.action_space.sample()
    o, r, done, _ = env.step(a)
    if done:
        break

env.close()

class TimeLimit(gym.Wrapper):

    def __init__(self, env: Env, max_episode_steps=None):

        super().__init__(env)
        self._max_episode_steps = max_episode_steps
        self._elapsed_steps = 0

    def step(self, action):
        o, r, done, info = self.env.step(action)
        self._elapsed_steps += 1
        if self._elapsed_steps >= self._max_episode_steps:
            done = True
            info['TimeLimit.truncated'] = True
        return o, r, done, info

    def reset(self, **kwargs):
        self._elapsed_steps = 0
        return self.envv.reset(**kwargs)

class QFunc(tf.keras.Model):

    def __init__(self, name, *args, **kwargs):
        super().__init__(name=name)
        self.conv1 = tf.keras.layers.Conv2D(
            32, kernel_size=(8, 8), strides = (4, 4),
            padding='valid', activation='relu'
        )
        self.conv2 = tf.keras.layers.Conv2D(
            64, kernel_size=(4, 4), strides=(2, 2),
            padding="valid", activation="relu"
        )
        self.conv3 = tf.keras.layers.Conv2D(
            64, kernel_size=(3, 3), strides=(1, 1),
            padding="valid", activation="relu"
        )
        self.flat = tf.keras.layers.Flatten()
        self.fc1 = tf.keras.layers.Dense(512, activation='relu')
        self.fc2 = tf.keras.layers.Dense(action_dim, activation='linear')


    def call(self, pixels, **kwargs):
        pixels = tf.divide(tf.cast(pixels, tf.float32), tf.constant(255.0))
        feature = self.flat(self.conv3(self.conv2(self.conv1(pixels))))
        qvalue = self.fc2(self.fc1(feature))

        return qvalue

class DQN():
    def __init__(self) -> None:
        self.qnet = QFunc('q')
        self.targetqnet = QFunc('targetq')
        sync(self.qnet, self.targetqnet)
        self.niter = 0
        self.optimizer = tf.optimizers.Adam(lr, epsilon=1e-5, clipnorm=clipnorm)

    @tf.function
    def _qvalues_func(self, obv):
        return self.qnet(obv)
    
    def get_action(self, obv):
        eps = epsilon(self.niter)
        if random.random() < eps:
            return int(random.random() * action_dim)
        else:
            obv = np.expand_dims(obv, 0).astype('float32')
            return self._qvalues_func(obv).numpy().argmax(1)[0]

    def train(self, b_o, b_a, b_r, b_o_, b_d):
        self._train_func(b_o, b_a, b_r, b_o_, b_d)

        self.niter += 1
        if self.niter: sync(self.qnet, self.targetqnet)


    def _train_func(self, b_o, b_a, b_r, b_o_, b_d):
        with tf.GradientTape() as tape: 
            td_errors = self._tderror_func(b_o, b_a, b_r, b_o_, b_d) 
            loss = tf.reduce_mean(huber_loss(td_errors))

        grad = tape.gradient(loss, self.qnet.trainable_weights) 
        self.optimizer.apply_gradients(zip(grad, self.qnet.trainable_weights))

        return td_errors

    @tf.function 
    def _tderror_func(self, b_o, b_a, b_r, b_o_, b_d): 
        b_q_ = (1 - b_d) * tf.reduce_max(self.targetqnet(b_o_), 1) 
        b_q = tf.reduce_sum(self.qnet(b_o) * tf.one_hot(b_a, action_dim), 1)

        return b_q - (b_r + reward_gamma * b_q_)

dqn = DQN() 
buffer = ReplayBuffer(buffer_size)

o = env.reset() 
nepisode = 0
t = time.time()

for i in range(1, number_time_steps + 1):
    a = dqn.get_action(o)
    # execute action and feed to replay buffer # note that ‘_‘ tail in var name means next 
    o_, r, done, info = env.step(a) 
    buffer.add(o, a, r, o_, done)

    if i >= warm_start and i:
        transitions = buffer.sample(batch_size) 
        dqn.train(*transitions)

    if done: o = env.reset()
    else: o = o_

    # episode in info is real (unwrapped) message 
    if info.get('episode'):
        nepisode += 1 
        reward, length = info['episode']['r'], info['episode']['l']
    print( 'Time steps so far: {}, episode so far: {}, '
           'episode reward: {:.4f}, episode length: {}'
                .format(i, nepisode, reward, length)
    )