from typing import Tuple
import tensorflow as tf
import numpy as np
import os, warnings 

if os.path.exists(r'./fmu_func.py'):
    from fmu_func import dishWasher
    from utils.anns import actor, critic
    from utils.action_noise import OUActionNoise
    from utils.buffer import Buffer
    from utils.params import get_params
else:
    from .fmu_func import dishWasher
    from .utils.anns import actor, critic
    from .utils.action_noise import OUActionNoise
    from .utils.buffer import Buffer
    from .utils.params import get_params
    
if os.path.exists(r'../model/'):
    DIR_PATH = r'../model/'
else:
    DIR_PATH = r'./model/'

class DDPG(object):
    """
        Main class implementation for Deep Deterministic Policy Gradient (DDPG)
        by Lilicrap et al. 2015.

        Args:

        env (gym.env): gym (or gymnasium) environment.
        buffer (Buffer): Buffer class implementation for experience replay.
        actor (tf.keras.Model): keras ANN for the actor.
        critic (tf.keras.Model): keras ANN for the critic.
        act_noise (ActionNoise): action noise class for exploration.
        gamma (float): forgetting factor.
        tau (float): smoothing constant for target networks update.
    """

    def __init__(self, num_states, num_actions,
                 actor_lr=1e-4, critic_lr=1e-4,
                 batch_size=128, buffer_capcaity=640,
                 sigma=1e-5, gamma=0.99, tau=1e-4, freq_iter=3,
                 max_prev_iter=5, inner_epoch_min=3) -> None:

        self.env = dishWasher()

        self._num_actions = num_actions
        self._num_states = num_states
        
        self._actor_lr = actor_lr
        self.actor_model = actor(num_states, num_actions, actor_lr)
        self.target_actor = tf.keras.models.clone_model(self.actor_model)
        self.best_actor_weights = self.actor_model.get_weights()

        self._critic_lr = critic_lr
        self.critic_model = critic(num_states, num_actions, critic_lr)
        self.target_critic = tf.keras.models.clone_model(self.critic_model)
        self.best_critic_weights = self.critic_model.get_weights()

        self.buffer = Buffer(num_states, num_actions, batch_size=batch_size,
                             buffer_capacity=int(buffer_capcaity))
        self.act_noise = OUActionNoise(mean=np.zeros(num_actions),
                              sigma=float(sigma) * np.ones(num_actions))
        self.act_upper_bound = np.ones(num_actions, dtype="float32")
        
        self.gamma = gamma
        self.tau = tau

        self._max_prev_iter = max(1, int(round(max_prev_iter)))
        self._inner_epoch_min = max(1, int(round(inner_epoch_min)))
        self._freq_iter = max(1, int(round(freq_iter)))
        self.reset()

    def policy(self, state, training=True):
        """
            Policy function to sample an action.

            Args:

            state (tuple): state observation used to determine next action.
            training (bool): whether or not the action should be noisy.

            Returns:
            action (np.array): action sampled from the policy.
        """
        # The network expects a batch of states, so we reshape the state
        # if it is not already a batch.
        if state.ndim <= len(self.buffer.num_states):
            state = state[None, ...]
        # If training, add noise to the action.
        if training:
            sampled_action = tf.squeeze(self.actor_model(state))
            noise = self.act_noise()
            sampled_action = sampled_action.numpy() + noise
        else:
            sampled_action = tf.squeeze(self.target_actor(state))
        # Clip the action to the environment's action space.
        sampled_action = np.clip(sampled_action, 0, 1)
        sampled_action *= self.act_upper_bound

        # If the action is a scalar, we reshape it.
        if not sampled_action.shape:
            action = [sampled_action]
        else:
            action = sampled_action

        return action

    @tf.function
    def update_target(self, target_weights, weights, tau):
        """
            Update the target networks using the soft update rule.

            Args:

            target_weights (list): list of target network weights.
            weights (list): list of network weights.
            tau (float): smoothing constant for target networks update.
        """
        for a, b in zip(target_weights, weights):
            a.assign(b * tau + a * (1 - tau))

    @tf.function
    def sgd_on_batch(self, state_batch, action_batch, reward_batch,
                     next_state_batch, clip_grad=True, grad_norm=10):
        """
            Perform a single step of gradient descent on a batch of data.

            Args:

            state_batch (np.array): batch of states.
            action_batch (np.array): batch of actions.
            reward_batch (np.array): batch of rewards.
            next_state_batch (np.array): batch of next states.
            clip_grad (bool): whether or not to clip the gradient.
            grad_norm (float): gradient norm to clip to.
        """
        # * Update the critic network.
        with tf.GradientTape() as tape:
            target_actions = self.target_actor(next_state_batch, training=True)
            target_actions = tf.multiply(target_actions, self.act_upper_bound)
            # Bellman equation using target networks.
            y = reward_batch + self.gamma * self.target_critic(
                [next_state_batch, target_actions], training=True
            )
            critic_value = self.critic_model(
                [state_batch, action_batch], training=True)
            critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
        # The critic loss is calculated using the target networks and Bellman
        # equation. This is called the mean-squared Bellman error (MSBE)
        # function.
        critic_grad = tape.gradient(
            critic_loss, self.critic_model.trainable_variables)

        # Clip the gradient if necessary. We also calculate the gradient norm
        # for logging purposes.
        if self.clip_grad:
            critic_grad, crt_norm = tf.clip_by_global_norm(critic_grad,
                                                           self.grad_norm)
        else:
            _, crt_norm = tf.clip_by_global_norm(critic_grad, 1)
        self.critic_model.optimizer.apply_gradients(
            zip(critic_grad, self.critic_model.trainable_variables)
        )
        # * Update the actor network.
        with tf.GradientTape() as tape:
            actions = self.actor_model(state_batch, training=True)
            actions = tf.multiply(actions, self.act_upper_bound)
            critic_value = self.critic_model(
                [state_batch, actions], training=True)
            # The actor loss is the negative of the critic value.
            # We want to maximize the critic value, which is equivalent to
            # minimizing the negative of the critic value.
            actor_loss = -tf.math.reduce_mean(critic_value)

        actor_grad = tape.gradient(
            actor_loss, self.actor_model.trainable_variables)
        if self.clip_grad:
            actor_grad, act_norm = tf.clip_by_global_norm(actor_grad,
                                                          self.grad_norm)
        else:
            _, act_norm = tf.clip_by_global_norm(actor_grad, 1)
        self.actor_model.optimizer.apply_gradients(
            zip(actor_grad, self.actor_model.trainable_variables)
        )
        return crt_norm, act_norm, critic_loss, actor_loss

    def fit(self, steps, max_steps_per_ep=np.Inf, log_freq=25,
            warm_up=50, verbose=1, clip_grad=True, learn_freq=1,
            eval_episodes=10, performance_th=np.Inf, grad_norm=5,
            checkpoints=False, checkpoint_path=DIR_PATH,
            keep_best: bool = True):
        """
            Train the agent.

            Args:

            steps (int): number of steps to train the agent for.
            max_steps_per_ep (int): maximum number of steps per episode.
            log_freq (int): frequency of logging. At this frequency, the
                target networks are used to evaluate the agent's performance
                for eval_episodes episodes.
            warm_up (int): number of steps to warm up the agent before
                training.
            verbose (int): verbosity level.
            clip_grad (bool): whether or not to clip the gradient.
            learn_freq (int): frequency of learning in number of steps. The
                agent learns every learn_freq steps.
            eval_episodes (int): number of episodes to evaluate the agent
                for.
            performance_th (float): threshold for the performance of the
                agent. If the agent's performance is above this threshold,
                the training is stopped.
            grad_norm (float): gradient norm to clip to.
            checkpoints (bool): whether or not to save checkpoints for the best
                models.
            checkpoint_path (str): path to save checkpoints.
            keep_best (bool): whether or not to keep the best performing
                model. If true, the best performing model is restored.
        """

        self.clip_grad = clip_grad
        self.grad_norm = grad_norm

        # To store reward history of each episode. Each list is large enough
        # to record information from all episodes within the interval of
        # logging.
        self.crt_grad_norm_list = [0] + [np.nan for _ in range(log_freq - 1)]
        self.act_grad_norm_list = [0] + [np.nan for _ in range(log_freq - 1)]
        self.actor_loss_list = [0] + [np.nan for _ in range(log_freq - 1)]
        self.critic_loss_list = [0] + [np.nan for _ in range(log_freq - 1)]
        self.eval_episodes = eval_episodes

        # Steps taken considering all episodes.
        steps_taken = 0
        episode = 0
        while steps_taken < steps:

            prev_state, _ = self.env.reset()
            done = False
            ep_steps = 0
            inner_stock = []
            while not done:
                if np.random.uniform(0,1) > 0.3:
                    action = self.env.pb_action()
                    if np.random.uniform(0,1) > 0.75:
                        direct = np.random.uniform(-0.5, 0.5, self._num_actions)
                        div_base = np.linalg.norm(direct) * np.linalg.norm(action)
                        if div_base > 1e-9:
                            action -= action * np.sum(action*direct) / div_base
                        else:
                            action -= direct*np.random.uniform(-1e-3,1e-3)
                else:
                    action = self.policy(prev_state)
                    if np.random.uniform(0,1) > 0.75:
                        direct = np.random.uniform(-0.5, 0.5, self._num_actions)
                        div_base = np.linalg.norm(direct) * np.linalg.norm(action)
                        if div_base > 1e-9:
                            action -= action * np.sum(action*direct) / div_base
                        else:
                            action -= direct*np.random.uniform(-1e-3,1e-3)
                # Terminated is true if the episode ends naturally, while
                # truncated is true if the episode is truncated due to
                # max_steps_per_ep.
                state, reward, terminated, truncated, action = self.env.step(action)
                done = terminated or truncated
                steps_taken += 1
                ep_steps += 1
                if steps_taken >= steps:
                    break
                _action = np.zeros(len(action), dtype="float32")
                if steps_taken % self._freq_iter == 0:
                    inner_stock.append([prev_state, action])
                    for _pre_state, _d_action in inner_stock[:-self._max_prev_iter:-1]:
                        _action += _d_action
                        _reward = self.env.cal_reward(_pre_state, state)
                        self.buffer.record((_pre_state, _action, _reward, state))
                    if (steps_taken > warm_up) and (steps_taken % learn_freq == 0):
                        self.learn()

                prev_state = state
                if ep_steps > max_steps_per_ep:
                    done = True
                    break
            inner_stock.append([prev_state, self.env._true_theta - self.env._theta])
            state = np.zeros(self._num_states, dtype="float32")
            for _pre_state, _d_action in inner_stock[:-self._max_prev_iter:-1]:
                _action += _d_action
                _reward = self.env.cal_reward(_pre_state, state)
                self.buffer.record((_pre_state, _action, _reward, state))
            episode += 1
            self.act_noise.reset()
            if (episode % log_freq == 0) and (episode != 0):
                if verbose > 0:
                    print(f"\nEpisode: {episode}")
                score = self.log_optimization_info(verbose)
                # Update the best actor and critic weights if the score is
                # better than the best score.
                if score >= self.best_avg_reward:
                    self.best_avg_reward = score
                    self.best_actor_weights = self.target_actor.get_weights()
                    self.best_critic_weights = self.target_critic.get_weights()

                    if checkpoints:
                        self.save_actor_weights(checkpoint_path +
                                                "checkpoint_actor.h5")
                        self.save_critic_weights(checkpoint_path +
                                                 "checkpoint_critic.h5")
                if score > performance_th:
                    if verbose > 0:
                        print("\nPerformance goal reached!! :)")
                    break
        # Restore the best actor and critic weights.
        if keep_best:
            self.target_actor.set_weights(self.best_actor_weights)
            self.target_critic.set_weights(self.best_critic_weights)
        return self.hist

    def learn(self) -> None:
        """
            Apply SGD to a batch of experiences.
        """

        crt_grad_norms, act_grad_norms, crt_losses, act_losses = [], [], [], []
        for _ in range(max(self._inner_epoch_min, int(round(self.buffer.length/self.buffer.batch_size*0.5)))):
            state_batch, action_batch, reward_batch, next_state_batch = self.buffer.read()
            _crt_grad_norm, _act_grad_norm, _crt_loss, _act_loss = \
                self.sgd_on_batch(state_batch, action_batch,
                                  reward_batch, next_state_batch,
                                  self.clip_grad, self.grad_norm)
            crt_grad_norms.append(_crt_grad_norm)
            act_grad_norms.append(_act_grad_norm)
            crt_losses.append(_crt_loss)
            act_losses.append(_act_loss)
        # Update the lists for logging. Only the last log_freq values are
        # stored.
        crt_grad_norm = np.mean(crt_grad_norms)
        act_grad_norm = np.mean(act_grad_norms)
        crt_loss = np.mean(crt_losses)
        act_loss = np.mean(act_losses)
        self.manage_optimization_lists(crt_grad_norm,
                                       act_grad_norm, crt_loss,
                                       act_loss)
        self.update_target(self.target_actor.variables,
                           self.actor_model.variables, self.tau)
        self.update_target(self.target_critic.variables,
                           self.critic_model.variables, self.tau)

    def evaluate(self, episodes: int = 5, max_step_inner: int = 50,
                 verbose: bool = True) -> Tuple[float, float, float]:
        """
        Evaluate a RL agent. This function is not adapted to multiple envs.

        Args:

        model: (policy function) the RL Agent
        episodes: (int) number of episodes to evaluate

        return: (float) Mean reward, standard deviation and episode length
        """

        episode_rewards_list = []
        episode_len_list = []
        if verbose:
            print("Evaluating policy...")
        for _ in range(episodes):
            episode_rewards = []
            steps = 0
            done = False
            obs, _ = self.env.reset()
            action = self.policy(obs.reshape(1, *self.buffer.num_states),
                                 training=False)
            while not done:
                state, reward, terminated, truncated, _ = self.env.step(action)
                action = self.policy(state)
                done = terminated or truncated
                steps += 1
                episode_rewards.append(reward)
                if steps > max_step_inner:
                    break
            episode_rewards_list.append(sum(episode_rewards))
            episode_len_list.append(steps)

        mean_episode_reward = np.mean(episode_rewards_list)
        max_episode_reward = np.max(episode_rewards_list)
        min_episode_reward = np.min(episode_rewards_list)
        std_episode_reward = np.std(episode_rewards_list)
        mean_episode_len = np.mean(episode_len_list)
        return mean_episode_reward, max_episode_reward, min_episode_reward, \
               std_episode_reward, mean_episode_len

    def manage_optimization_lists(self, crt_grad_norm, act_grad_norm, crt_loss,
                                  act_loss):
        """
        Update the lists for logging. Only the last log_freq values are stored.
        """
        self.crt_grad_norm_list.append(crt_grad_norm)
        self.crt_grad_norm_list.pop(0)
        self.act_grad_norm_list.append(act_grad_norm)
        self.act_grad_norm_list.pop(0)
        self.critic_loss_list.append(crt_loss)
        self.critic_loss_list.pop(0)
        self.actor_loss_list.append(act_loss)
        self.actor_loss_list.pop(0)

    def log_optimization_info(self, verbose):
        """
        Evaluate the agent, log the optimization info and print it if verbose
        is true.
        """
        mean_return, max_return, min_return, std_return, mean_len = self.evaluate(
            self.eval_episodes)
        mean_crt_grad = np.nanmean(self.crt_grad_norm_list)
        mean_act_grad = np.nanmean(self.act_grad_norm_list)
        mean_crt_loss = np.nanmean(self.critic_loss_list)
        mean_act_loss = np.nanmean(self.actor_loss_list)
        self.hist['mean_returns'].append(mean_return)
        self.hist['max_returns'].append(max_return)
        self.hist['min_returns'].append(min_return)
        self.hist['std_returns'].append(std_return)
        self.hist['mean_lens'].append(mean_len)
        if verbose > 0:
            print("\n-----------------------------------\n" +
                       f"\nMean return: {mean_return:.2e}" +
                       f"\nMax return: {max_return:.2e}" +
                       f"\nMin return: {min_return:.2e}" +
                       f"\nStd return: {std_return:.2e}" +
                       f"\nMean length: {mean_len:.2e}" +
                       "\nCritic gradient norm: " +
                       f"{mean_crt_grad:.2e}" +
                       "\nActor gradient norm: " +
                       f"{mean_act_grad:.2e}" +
                       "\nCritic loss: " +
                       f"{mean_crt_loss:.2e}" +
                       "\nActor loss: " +
                       f"{mean_act_loss:.2e}" +
                       "\n-----------------------------------\n",
                       end="")
        return mean_return

    def load_actor_weights(self, path):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            self.target_actor = tf.keras.models.load_model(path)
            self.target_actor.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self._actor_lr))
            self.actor_model = tf.keras.models.load_model(path)
            self.actor_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self._actor_lr))
            self.best_actor_weights = self.actor_model.get_weights()

    def load_critic_weights(self, path):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            self.target_critic = tf.keras.models.load_model(path)
            self.target_critic.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self._critic_lr))
            self.critic_model = tf.keras.models.load_model(path)
            self.critic_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self._critic_lr))
            self.best_critic_weights = self.critic_model.get_weights()

    def save_actor_weights(self, path):
        self.target_actor.save(path)

    def save_critic_weights(self, path):
        self.target_critic.save(path)

    def reset(self):
        self.buffer.clear()
        self.crt_grad_norm_list = [0]
        self.act_grad_norm_list = [0]
        self.actor_loss_list = [0]
        self.critic_loss_list = [0]
        self.best_avg_reward = -np.inf
        self.eval_episodes = None
        self.hist = {'max_returns': [], 'min_returns': [], 'mean_returns': [], 'std_returns': [], 'mean_lens': []}
        self.clip_grad = None
        self.grad_norm = None

if __name__ == "__main__":
    
    from fmu_func import simulator
    import time

    num_actions = 5
    num_states = simulator([0 for _ in range(num_actions)]).shape
    ACTOR_LR = 1e-4
    CRITIC_LR = 1e-4
    BATCH_SIZE = 128
    BUFFER_CAPACITY = BATCH_SIZE * 256
    SIGMA = 1e-5
    TAU = 1e-4
    N_TRAINING_STEPS = 32
    WARM_UP_STEPS = 5
    CLIP_GRADIENTS = True
    LOG_FREQ = 2
    EVAL_EPISODES = 10
    LEARN_FREQ = 5
    PERFORMANCE_TH = 1e8
    SAVE_BEST = True
    ENVIRONMENT = "dishWasher"
    
    ddpg = DDPG(num_states, num_actions,
                 actor_lr=ACTOR_LR, critic_lr=CRITIC_LR,
                 batch_size=BATCH_SIZE, buffer_capcaity=BUFFER_CAPACITY,
                 sigma=SIGMA, tau=TAU)

    ddpg.load_actor_weights(rf"{DIR_PATH}actor_ddpg-{ENVIRONMENT}.h5")
    ddpg.load_critic_weights(rf"{DIR_PATH}critic_ddpg-{ENVIRONMENT}.h5")

    for _ in range(10):
        start = time.time()

        hist = ddpg.fit(int(N_TRAINING_STEPS) * 20,
                        max_steps_per_ep=100,
                        warm_up=WARM_UP_STEPS,
                        clip_grad=CLIP_GRADIENTS,
                        log_freq=LOG_FREQ,
                        eval_episodes=EVAL_EPISODES,
                        learn_freq=LEARN_FREQ,
                        performance_th=PERFORMANCE_TH,
                        keep_best=SAVE_BEST
                        )
        end = time.time()
        time_taken = end - start

        import matplotlib.pyplot as plt
        episodes = LOG_FREQ * (np.arange(len(hist['mean_returns']))+1)
        plt.scatter(episodes, hist['mean_returns'])
        plt.plot(episodes, hist['mean_returns'])
        plt.fill_between(episodes, hist['min_returns'], hist['max_returns'], facecolor="red", alpha=0.2)
        # change x and y labels
        plt.xlabel('Episodes')
        plt.ylabel('Mean reward')

        ret = ddpg.evaluate(2 * EVAL_EPISODES)
        print("Evaluation:", {'mean_reward': ret[0], 'std_reward': ret[1],
                   'time_taken_to_train_s': time_taken})
        
        ddpg.save_actor_weights(rf"{DIR_PATH}actor_ddpg-{ENVIRONMENT}.h5")
        ddpg.save_critic_weights(rf"{DIR_PATH}critic_ddpg-{ENVIRONMENT}.h5")
        
        plt.savefig("actual_result.png")
        plt.close()
