import random
from collections import deque

import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, layers


class Agent:

    def __init__(self, env, optimizer, q_network=None, input_shape=None, gamma=1.,
                 epsilon=1., epsilon_decay=0.995, epsilon_min=0.01):
        self._env = env
        # self._state_size = self._env.observation_space.n
        self._action_size = self._env.action_space.n
        self._optimizer = optimizer

        self.replay_buffer = deque(maxlen=10000)

        # Initialize discount and exploration rate
        self.gamma = gamma
        self.epsilon, self.epsilon_decay, self.epsilon_min = epsilon, epsilon_decay, epsilon_min

        # Build networks
        assert issubclass(q_network, Model)
        self.model = q_network(self._action_size)
        self.model(layers.Input(input_shape))
        self.target_model = q_network(self._action_size)
        self.target_model(layers.Input(input_shape))
        self.align_target_model()

    def align_target_model(self):
        self.target_model.set_weights(self.model.get_weights())

    def update_epsilon(self, num_exploration_episodes, i_episodes):
        # 计算当前探索率
        # 使用占比
        # self.epsilon *= (num_exploration_episodes - i_episodes) / num_exploration_episodes

        # 使用decay
        # self.epsilon *= self.epsilon_decay

        # improved
        self.epsilon = self.epsilon_min + (1.0 - self.epsilon_min) * np.exp(-self.epsilon_decay * i_episodes)
        self.epsilon = max(self.epsilon, self.epsilon_min)

    def epsilon_greedy_policy(self, state):
        if random.random() < self.epsilon:
            return self._env.action_space.sample()
        else:
            action = self.model.predict(np.expand_dims(state, axis=0)).numpy()
            return action[0]

    def store(self, obs, action, reward, next_obs, done):
        self.replay_buffer.append((obs, action, reward, next_obs, 1 if done else 0))

    def train(self, batch_size, loss_object, train_loss):
        """Train DQN using TD learning
        Model's estimate at time t ~ Actual time + Gamma * Model's estimate at time t+1
        """
        batch_state, batch_action, batch_reward, batch_next_state, batch_done = zip(
            *random.sample(self.replay_buffer, batch_size))
        batch_state, batch_reward, batch_next_state, batch_done = \
            [np.array(a, dtype=np.float32) for a in [batch_state, batch_reward, batch_next_state, batch_done]]
        batch_action = np.array(batch_action, dtype=np.int32)

        # Target
        q_value = self.target_model(batch_next_state)
        y = batch_reward + (self.gamma * tf.reduce_max(q_value, axis=1)) * (1 - batch_done)
        self._train_step(batch_action, batch_state, y, loss_object, train_loss)

    @tf.function
    def _train_step(self, batch_action, batch_state, y, loss_object, train_loss):
        with tf.GradientTape() as tape:
            # Prediction
            q_value = self.model(batch_state) * tf.one_hot(batch_action, depth=self._action_size)
            y_pred = tf.reduce_sum(q_value, axis=1)
            # 最小化 y 和 Q-value 的距离
            loss = loss_object(y, y_pred)
        grads = tape.gradient(loss, self.model.variables)
        # 计算梯度并更新参数
        self._optimizer.apply_gradients(grads_and_vars=zip(grads, self.model.variables))
        train_loss(loss)

    def save_model(self, export_dir):
        tf.saved_model.save(self.model, export_dir)
