import gym
import random
import imageio
import datetime
import numpy as np
from collections import deque
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Lambda, Concatenate
from tensorflow.keras.optimizers import Adam

from Prioritized_Replay import Memory

# Reference papers: https://arxiv.org/pdf/1802.06480.pdf, https://arxiv.org/pdf/1801.05757.pdf

tf.keras.backend.set_floatx('float64')


def actor(state_shape, action_dim, action_bound, action_shift, units=(400, 300)):
    state = Input(shape=state_shape)
    x = Dense(units[0], name="L0", activation='relu')(state)
    for index in range(1, len(units)):
        x = Dense(units[index], name="L{}".format(index), activation='relu')(x)

    unscaled_output = Dense(action_dim, name="Out", activation='tanh')(x)
    scalar = action_bound * np.ones(action_dim)
    output = Lambda(lambda op: op * scalar)(unscaled_output)
    if np.sum(action_shift) != 0:
        output = Lambda(lambda op: op + action_shift)(output)  # for action range not centered at zero

    model = Model(inputs=state, outputs=output)

    return model


def critic(state_shape, action_dim, units=(48, 24)):
    inputs = [Input(shape=state_shape), Input(shape=(action_dim,))]
    concat = Concatenate(axis=-1)(inputs)
    x = Dense(units[0], name="L0", activation='relu')(concat)
    for index in range(1, len(units)):
        x = Dense(units[index], name="L{}".format(index), activation='relu')(x)
    output = Dense(1, name="Out")(x)
    model = Model(inputs=inputs, outputs=output)

    return model


def update_target_weights(model, target_model, tau=0.005):
    weights = model.get_weights()
    target_weights = target_model.get_weights()
    for i in range(len(target_weights)):  # set tau% of target model to be new weights
        target_weights[i] = weights[i] * tau + target_weights[i] * (1 - tau)
    target_model.set_weights(target_weights)


# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py
class OrnsteinUhlenbeckNoise:
    def __init__(self, mu, sigma=0.2, theta=.15, dt=1e-2, x0=None):
        self.theta = theta
        self.mu = mu
        self.sigma = sigma
        self.dt = dt
        self.x0 = x0
        self.reset()

    def __call__(self):
        x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
        self.x_prev = x
        return x

    def reset(self):
        self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)


class NormalNoise:
    def __init__(self, mu, sigma=0.15):
        self.mu = mu
        self.sigma = sigma

    def __call__(self):
        return np.random.normal(scale=self.sigma, size=self.mu.shape)

    def reset(self):
        pass


class PD_DDPG:
    def __init__(
            self,
            env,
            discrete=False,
            use_priority=False,
            lr_actor=1e-5,
            lr_critic=1e-3,
            lr_lam=1e-5,
            actor_units=(24, 16),
            critic_units=(24, 16),
            noise='norm',
            cost_const=1,
            sigma=0.15,
            tau=0.125,
            gamma=0.85,
            batch_size=64,
            memory_cap=100000
    ):
        self.env = env
        self.state_shape = env.observation_space.shape  # shape of observations
        self.action_dim = env.action_space.n if discrete else env.action_space.shape[0]  # number of actions
        self.discrete = discrete
        self.action_bound = (env.action_space.high - env.action_space.low) / 2 if not discrete else 1.
        self.action_shift = (env.action_space.high + env.action_space.low) / 2 if not discrete else 0.
        self.use_priority = use_priority
        self.memory = Memory(capacity=memory_cap) if use_priority else deque(maxlen=memory_cap)
        if noise == 'ou':
            self.noise = OrnsteinUhlenbeckNoise(mu=np.zeros(self.action_dim), sigma=sigma)
        else:
            self.noise = NormalNoise(mu=np.zeros(self.action_dim), sigma=sigma)

        # Define and initialize Actor network
        self.actor = actor(self.state_shape, self.action_dim, self.action_bound, self.action_shift, actor_units)
        self.actor_target = actor(self.state_shape, self.action_dim, self.action_bound, self.action_shift, actor_units)
        self.actor_optimizer = Adam(learning_rate=lr_actor)
        update_target_weights(self.actor, self.actor_target, tau=1.)

        # Define and initialize Critic networks
        self.reward_critic = critic(self.state_shape, self.action_dim, critic_units)
        self.reward_critic_target = critic(self.state_shape, self.action_dim, critic_units)
        self.reward_critic_optimizer = Adam(learning_rate=lr_critic)
        self.reward_critic.compile(loss="mean_squared_error", optimizer=self.reward_critic_optimizer)
        update_target_weights(self.reward_critic, self.reward_critic_target, tau=1.)

        self.cost_critic = critic(self.state_shape, self.action_dim, critic_units)
        self.cost_critic_target = critic(self.state_shape, self.action_dim, critic_units)
        self.cost_critic_optimizer = Adam(learning_rate=lr_critic)
        self.cost_critic.compile(loss="mean_squared_error", optimizer=self.cost_critic_optimizer)
        update_target_weights(self.cost_critic, self.cost_critic_target, tau=1.)

        # Dual variable
        self.lam = tf.Variable(0.0, dtype=tf.float64)
        self.lr_lam = lr_lam

        # Set hyperparameters
        self.gamma = gamma  # discount factor
        self.tau = tau  # target model update
        self.batch_size = batch_size
        self.cost_constraint = cost_const  # long tern cost <= d
        if use_priority:
            self.phi = 0.6

        # Tensorboard
        self.summaries = {}

    def act(self, state, add_noise=True):
        state = np.expand_dims(state, axis=0).astype(np.float32)
        a = self.actor.predict(state)
        a += self.noise() * add_noise * self.action_bound
        a = tf.clip_by_value(a, -self.action_bound + self.action_shift, self.action_bound + self.action_shift)

        self.summaries['reward_q_val'] = self.reward_critic.predict([state, a])[0][0]
        self.summaries['cost_q_val'] = self.cost_critic.predict([state, a])[0][0]

        return a

    def save_model(self, a_fn, reward_c_fn, cost_c_fn):
        self.actor.save(a_fn)
        self.reward_critic.save(reward_c_fn)
        self.cost_critic.save(cost_c_fn)

    def load_actor(self, a_fn):
        self.actor.load_weights(a_fn)
        self.actor_target.load_weights(a_fn)
        print(self.actor.summary())

    def load_critic(self, reward_c_fn, cost_c_fn):
        self.reward_critic.load_weights(reward_c_fn)
        self.reward_critic_target.load_weights(reward_c_fn)
        self.cost_critic.load_weights(cost_c_fn)
        self.cost_critic_target.load_weights(cost_c_fn)
        print(self.reward_critic.summary())

    def remember(self, state, action, reward, cost, next_state, done):
        if self.use_priority:
            action = np.squeeze(action)
            transition = np.hstack([state, action, reward, cost, next_state, done])
            self.memory.store(transition)
        else:
            state = np.expand_dims(state, axis=0)
            next_state = np.expand_dims(next_state, axis=0)
            self.memory.append([state, action, reward, cost, next_state, done])

    def replay(self):
        if len(self.memory) < self.batch_size:
            return

        if self.use_priority:
            tree_idx, samples, ISWeights = self.memory.sample(self.batch_size)
            split_shape = np.cumsum([self.state_shape[0], self.action_dim, 1, 1, self.state_shape[0]])
            states, actions, rewards, costs, next_states, dones = np.hsplit(samples, split_shape)
        else:
            ISWeights = 1.0
            samples = random.sample(self.memory, self.batch_size)
            s = np.array(samples).T
            states, actions, rewards, costs, next_states, dones = [np.vstack(s[i, :]).astype(np.float) for i in range(6)]

        next_actions = self.actor_target.predict(next_states)
        reward_q_future = self.reward_critic_target.predict([next_states, next_actions])
        reward_target_qs = rewards + reward_q_future * self.gamma * (1. - dones)

        cost_q_future = self.cost_critic_target.predict([next_states, next_actions])
        cost_target_qs = costs + cost_q_future * self.gamma * (1. - dones)

        # train critic
        with tf.GradientTape() as tape:
            q_values = self.reward_critic([states, actions])
            reward_td_error = q_values - reward_target_qs
            reward_critic_loss = tf.reduce_mean(ISWeights * tf.math.square(reward_td_error))

        reward_critic_grad = tape.gradient(reward_critic_loss, self.reward_critic.trainable_variables)
        self.reward_critic_optimizer.apply_gradients(zip(reward_critic_grad, self.reward_critic.trainable_variables))

        with tf.GradientTape() as tape:
            q_values = self.cost_critic([states, actions])
            cost_td_error = q_values - cost_target_qs
            cost_critic_loss = tf.reduce_mean(ISWeights * tf.math.square(cost_td_error))

        cost_critic_grad = tape.gradient(cost_critic_loss, self.cost_critic.trainable_variables)
        self.cost_critic_optimizer.apply_gradients(zip(cost_critic_grad, self.cost_critic.trainable_variables))

        # train actor
        with tf.GradientTape(persistent=True) as tape:
            actions = self.actor(states)
            cost_q = self.cost_critic([states, actions])
            reward_q = self.reward_critic([states, actions])
            actor_loss = -tf.reduce_mean(reward_q - self.lam * cost_q)

        actor_grad = tape.gradient(actor_loss, self.actor.trainable_variables)  # compute actor gradient
        self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))

        # update priority based on eq (8) in https://arxiv.org/pdf/1801.05757.pdf
        if self.use_priority:
            abs_errors = tf.reduce_sum(tf.abs(reward_td_error) + tf.abs(cost_td_error), axis=1)
            action_grad = tape.gradient(actor_loss, actions)
            mean_abs_grad = tf.reduce_mean(tf.abs(action_grad), axis=1)
            priority = self.phi * abs_errors + (1 - self.phi) * mean_abs_grad
            self.memory.batch_update(tree_idx, priority)

        # update dual variable
        lam_update = tf.reduce_mean(cost_q - self.cost_constraint)
        self.lam.assign_add(self.lr_lam * lam_update)
        self.lam.assign(tf.maximum(self.lam, 0.0))

        # tensorboard info
        self.summaries['reward_critic_loss'] = reward_critic_loss
        self.summaries['cost_critic_loss'] = cost_critic_loss
        self.summaries['actor_loss'] = actor_loss
        self.summaries['lam_update'] = lam_update

    def train(self, max_episodes=50, max_epochs=8000, max_steps=500, save_freq=50):
        current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        train_log_dir = 'logs/Primal_Dual_DDPG_' + current_time
        summary_writer = tf.summary.create_file_writer(train_log_dir)

        done, episode, steps, epoch, total_reward = False, 0, 0, 0, 0
        cur_state = self.env.reset()
        while episode < max_episodes or epoch < max_epochs:
            if done:
                episode += 1
                print("episode {}: {} total reward, {} steps, {} epochs".format(
                    episode, total_reward, steps, epoch))

                with summary_writer.as_default():
                    tf.summary.scalar('Main/episode_reward', total_reward, step=episode)
                    tf.summary.scalar('Main/episode_steps', steps, step=episode)

                summary_writer.flush()
                self.noise.reset()

                if steps >= max_steps:
                    print("episode {}, reached max steps".format(episode))
                    self.save_model("ddpg_actor_episode{}.h5".format(episode),
                                    "ddpg_reward_critic_episode{}.h5".format(episode),
                                    "ddpg_cost_critic_episode{}.h5".format(episode),)

                done, cur_state, steps, total_reward = False, self.env.reset(), 0, 0
                if episode % save_freq == 0:
                    self.save_model("ddpg_actor_episode{}.h5".format(episode),
                                    "ddpg_reward_critic_episode{}.h5".format(episode),
                                    "ddpg_cost_critic_episode{}.h5".format(episode),)

            a = self.act(cur_state)  # model determine action given state
            action = np.argmax(a) if self.discrete else a[0]  # post process for discrete action space
            next_state, reward, done, _ = self.env.step(action)  # perform action on env
            self.env.render()

            # Define cost for CartPole based on x pos, x pos values range from (-2.4, 2.4)
            cost = abs(next_state[0])

            self.remember(cur_state, a, reward, cost, next_state, done)  # add to memory
            self.replay()  # train models through memory replay

            update_target_weights(self.actor, self.actor_target, tau=self.tau)  # iterates target model
            update_target_weights(self.reward_critic, self.reward_critic_target, tau=self.tau)
            update_target_weights(self.cost_critic, self.cost_critic_target, tau=self.tau)

            cur_state = next_state
            total_reward += reward
            steps += 1
            epoch += 1

            # Tensorboard update
            with summary_writer.as_default():
                if len(self.memory) > self.batch_size:
                    tf.summary.scalar('Loss/actor_loss', self.summaries['actor_loss'], step=epoch)
                    tf.summary.scalar('Loss/reward_critic_loss', self.summaries['reward_critic_loss'], step=epoch)
                    tf.summary.scalar('Loss/cost_critic_loss', self.summaries['cost_critic_loss'], step=epoch)
                    tf.summary.scalar('Loss/lam_update', self.summaries['lam_update'], step=epoch)
                tf.summary.scalar('Main/step_reward', reward, step=epoch)
                tf.summary.scalar('Main/step_cost', cost, step=epoch)
                tf.summary.scalar('Stats/reward_q_val', self.summaries['reward_q_val'], step=epoch)
                tf.summary.scalar('Stats/cost_q_val', self.summaries['cost_q_val'], step=epoch)
                tf.summary.scalar('Stats/lambda', self.lam.numpy(), step=epoch)

            summary_writer.flush()

        self.save_model("ddpg_actor_final_episode{}.h5".format(episode),
                        "ddpg_reward_critic_final_episode{}.h5".format(episode),
                        "ddpg_cost_critic_final_episode{}.h5".format(episode),)

    def test(self, render=True, fps=30, filename='test_render.mp4'):
        cur_state, done, rewards = self.env.reset(), False, 0
        video = imageio.get_writer(filename, fps=fps)
        while not done:
            a = self.act(cur_state, add_noise=False)
            action = np.argmax(a) if self.discrete else a[0]  # post process for discrete action space
            next_state, reward, done, _ = self.env.step(action)
            cur_state = next_state
            rewards += reward
            if render:
                video.append_data(self.env.render(mode='rgb_array'))
        video.close()
        return rewards


if __name__ == "__main__":
    gym_env = gym.make('CartPole-v0')
    try:
        # Ensure action bound is symmetric
        assert (gym_env.action_space.high == -gym_env.action_space.low)
        is_discrete = False
        print('Continuous Action Space')
    except AttributeError:
        is_discrete = True
        print('Discrete Action Space')

    ddpg = PD_DDPG(gym_env, discrete=is_discrete)
    # ddpg.load_actor("ddpg_actor_episode124.h5")
    ddpg.train(max_episodes=1000)
    # rewards = ddpg.test()
    # print("Total rewards: ", rewards)
