import time

import gymnasium as gym
import numpy as np
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F

from agents.base_agent import BaseAgent
from network.ppo_discrete_net import Actor, Critic
from utilities.normalization import Normalization
from utilities.onpolicy_replay_buffer import OnPolicyReplayBuffer


class PPODiscrete(BaseAgent):
    """A PPO agent for discrete task
    1. GAE
    2. mini-batch update
    3. state normalization
    4. advantage normalization
    5. policy entropy loss
    6. Adam lr linear decay
    7. L2 grad norm
    8. orthogonal init
    9. tanh
    """
    agent_name = 'PPODiscrete'

    def __init__(self, config):
        super().__init__(config)
        self.batch_size = self.config['batch_size']
        self.mini_batch_size = self.config['mini_batch_size']
        self.epochs = self.config['epochs']
        self.gamma = self.config['gamma']
        self.lamda = self.config['lamda']
        self.clip_epsilon = self.config['clip_epsilon']
        self.grad_clip = self.config['grad_clip']
        self.entropy_coef = self.config['entropy_coef']
        self.use_state_norm = self.config['use_state_norm']
        self.use_adv_norm = self.config['use_adv_norm']
        self.use_lr_decay = self.config['use_lr_decay']
        self.actor_lr = self.config['actor_lr']
        self.critic_lr = self.config['critic_lr']
        self.net_hyper = {'hidden_dim': self.config['hidden_dim'], 'use_tanh': self.config['use_tanh'],
                          'use_orthogonal_init': self.config['use_orthogonal_init']}
        self.actor = Actor(self.state_size, self.action_size, self.net_hyper).to(self.device)
        self.critic = Critic(self.state_size, self.net_hyper).to(self.device)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr, eps=1e-5)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, eps=1e-5)
        self.replay_buffer = OnPolicyReplayBuffer(self.device)
        if self.use_state_norm:
            self.state_norm = Normalization(shape=self.state_size)
        else:
            self.state_norm = None
        self.num_workers = min(config['num_workers'], mp.cpu_count() - 2)
        self.use_parallel = True if self.num_workers > 1 else False
        if self.use_parallel:
            self.start_events = [mp.Event() for _ in range(self.num_workers)]
            self.done_events = [mp.Event() for _ in range(self.num_workers)]
            self.stop_event = mp.Event()
            manager = mp.Manager()
            self.results = manager.list([None] * self.num_workers)
            self.params = manager.list([None] * self.num_workers)
            self.workers = []

    def run_single(self):
        episode_step = 0
        episode_reward = 0
        evaluate_num = 0
        state, _ = self.env.reset(seed=self.seed)
        if self.use_state_norm:
            state = self.state_norm(state)
        while self.total_step < self.max_steps:
            action = self.select_action(state)
            next_state, reward, terminated, truncated, _ = self.env.step(action)
            done = terminated or truncated
            if self.use_state_norm:
                next_state = self.state_norm(next_state)
            mask = 0 if terminated else self.gamma
            self.replay_buffer.store(state, action, reward, next_state, mask, done)
            state = next_state
            self.total_step += 1
            episode_step += 1
            episode_reward += reward
            if done:
                self.episode += 1
                self.print_results(episode_step, episode_reward)
                episode_step = 0
                episode_reward = 0
                state, _ = self.env.reset(seed=self.seed)
                if self.use_state_norm:
                    state = self.state_norm(state)
            if len(self.replay_buffer) == self.batch_size: #and not self.goal_achieved:
                self.train()
                self.replay_buffer.reset()
            if self.total_step % self.eval_freq == 0:
                evaluate_num += 1
                self.evaluations.append(self.eval_model(self.actor, self.state_norm))
                print(f"Evaluation at {self.total_step} steps: {self.evaluations[-1]:.2f}")
                # if evaluate_num % 20 == 0:
                #     np.save(f'../results/data/{self.__class__.agent_name}_{self.env_name}_seed{self.seed}.npy',
                #             np.array(self.evaluations))
                self.save_model(self.actor, self.state_norm)

    def select_action(self, state):
        self.actor.to('cpu')
        state = torch.tensor(state).unsqueeze(0).float()
        dist = self.actor.get_dist(state)
        action = dist.sample()
        return action.item()

    def evaluate(self, state):
        state = torch.tensor(state).unsqueeze(0).float()
        prob = self.actor(state).detach().numpy().flatten()
        action = np.argmax(prob)
        return action

    def train(self):
        self.actor.to(self.device)
        states, actions, rewards, next_states, masks, dones = self.replay_buffer.sample()
        values = self.critic(states).detach()
        next_values = self.critic(next_states).detach()
        td_errors = rewards + masks * next_values - values
        advantages, returns = self.compute_advantage(values, td_errors, dones)
        dist = self.actor.get_dist(states)
        old_log_probs = dist.log_prob(actions.squeeze(1)).view(-1, 1).detach()
        iter_num = int(states.shape[0] / self.mini_batch_size)
        for _ in range(self.epochs):
            perm = np.arange(states.shape[0])
            np.random.shuffle(perm)
            perm = torch.LongTensor(perm).to(self.device)
            states = states[perm].clone()
            actions = actions[perm].clone()
            advantages = advantages[perm].clone()
            returns = returns[perm].clone()
            old_log_probs = old_log_probs[perm].clone()
            for i in range(iter_num):
                ind = slice(i * self.mini_batch_size, min((i + 1) * self.mini_batch_size, states.shape[0]))
                states_b = states[ind]
                actions_b = actions[ind]
                advantages_b = advantages[ind]
                returns_b = returns[ind]
                old_log_probs_b = old_log_probs[ind]
                dist = self.actor.get_dist(states_b)
                log_probs_b = dist.log_prob(actions_b.squeeze(1)).view(-1, 1)
                ratio = torch.exp(log_probs_b - old_log_probs_b)
                surr1 = ratio * advantages_b
                surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages_b
                entropy = dist.entropy().view(-1, 1)
                actor_loss = (-torch.min(surr1, surr2) - self.entropy_coef * entropy).mean()
                self.take_optimization(self.actor_optimizer, actor_loss, self.grad_clip, self.actor)
                critic_loss = F.mse_loss(self.critic(states_b), returns_b)
                self.take_optimization(self.critic_optimizer, critic_loss, self.grad_clip, self.critic)
                self.train_step += 1
        if self.use_lr_decay:
            self.lr_decay()
        # print(f"LR = {self.actor_optimizer.param_groups[0]['lr']:.10f}")
        # torch.cuda.empty_cache()

    def compute_advantage(self, values, td_errors, dones):
        advantages = []
        advantage = 0
        td_errors = td_errors.cpu().numpy().flatten()
        dones = dones.cpu().numpy().flatten()
        for delta, done in zip(reversed(td_errors), reversed(dones)):
            advantage = delta + self.gamma * self.lamda * advantage * (1 - done)
            advantages.append(advantage)
        advantages.reverse()
        advantages = torch.tensor(np.array(advantages)).float().view(-1, 1).to(self.device)
        returns = advantages + values
        if self.use_adv_norm:
            advantages = (advantages - advantages.mean()) / advantages.std()
        return advantages, returns

    def lr_decay(self):
        actor_lr_now = self.actor_lr * (1 - self.total_step / self.max_steps)
        critic_lr_now = self.critic_lr * (1 - self.total_step / self.max_steps)
        for p in self.actor_optimizer.param_groups:
            p['lr'] = actor_lr_now
        for p in self.critic_optimizer.param_groups:
            p['lr'] = critic_lr_now


    ########### parallel ############
    def run(self):
        if self.use_parallel:
            self.run_parallel()
        else:
            self.run_single()
        return self.evaluations

    def run_parallel(self):
        self.start()
        try:
            while self.total_step < self.max_steps:
                for event in self.start_events:
                    event.set()
                for event in self.done_events:
                    event.wait()
                for event in self.done_events:
                    event.clear()

                for i in range(self.num_workers):
                    data = self.results[i]
                    if data is None or len(data['states']) == 0:
                        print(f"Warning: worker {i} return empty data")
                        continue
                    for j in range(len(data['states'])):
                        sample = (data['states'][j], data['actions'][j], data['rewards'][j], data['next_states'][j],
                                  data['masks'][j], data['dones'][j])
                        self.replay_buffer.store(*sample)
                        self.total_step += 1
                        if self.total_step % self.eval_freq == 0:
                            self.evaluations.append(self.eval_model(self.actor))
                            self.save_model(self.actor)
                            print(f"Evaluation at {self.total_step} steps: {self.evaluations[-1]:.2f}")
                        if self.total_step >= self.max_steps:
                            break
                    self.results[i] = None

                # if not self.goal_achieved:
                self.train()
                self.replay_buffer.reset()
                for worker_i in range(self.num_workers):
                    self.params[worker_i] = self.actor.to('cpu').state_dict()
                # print('main: ', self.actor.state_dict()['l1.weight'][0])
        except KeyboardInterrupt:
            print("Training abort......")
        finally:
            self.stop()

    @staticmethod
    def worker_process(worker_id, env_name, seed, state_dim, action_dim, net_hyper, gamma, sample_steps, start_event,
                       done_event, stop_event, results, params, state_norm):
        env = gym.make(env_name)
        local_model = Actor(state_dim, action_dim, net_hyper)
        episode = 0
        max_reward = float('-inf')

        while not stop_event.is_set():
            start_event.wait()
            start_event.clear()

            if params[worker_id] is not None:
                local_model.load_state_dict(params[worker_id])
                params[worker_id] = None
            # print('local: ', local_model.state_dict()['l1.weight'][0])

            episode_step = 0
            episode_reward = 0
            states, actions, rewards, next_states, masks, dones = [], [], [], [], [], []
            state, _ = env.reset(seed=seed)
            if state_norm is not None:
                state = state_norm(state)
            for _ in range(sample_steps):
                state_t = torch.tensor(state).unsqueeze(0).float()
                probs = local_model(state_t)
                action_dist = torch.distributions.Categorical(probs)
                action = action_dist.sample().item()
                next_state, reward, terminated, truncated, _ = env.step(action)
                if state_norm is not None:
                    next_state = state_norm(next_state)
                done = terminated or truncated
                mask = 0 if terminated else gamma

                states.append(state)
                actions.append(action)
                rewards.append(reward)
                next_states.append(next_state)
                masks.append(mask)
                dones.append(done)

                state = next_state
                episode_reward += reward
                episode_step += 1

                if done:
                    if episode_reward > max_reward:
                        max_reward = episode_reward
                    episode += 1
                    print(f"Worker:{worker_id} Episode:{episode} Episode_T:{episode_step} Score:{episode_reward:.2f} "
                          f"Max_score:{max_reward:.2f}")
                    episode_step = 0
                    episode_reward = 0
                    state, _ = env.reset(seed=seed)
                    if state_norm is not None:
                        state = state_norm(state)

            data = {'states': states,
                    'actions': actions,
                    'rewards': rewards,
                    'next_states': next_states,
                    'masks': masks,
                    'dones': dones}
            results[worker_id] = data

            done_event.set()

    def start(self):
        print(f"Start {self.num_workers} sampling workers......")
        sample_steps = int(self.batch_size / self.num_workers)
        for i in range(self.num_workers):
            p = mp.Process(
                target=self.worker_process,
                args=(i, self.env_name, self.seed + i, self.state_size, self.action_size, self.net_hyper, self.gamma,
                      sample_steps, self.start_events[i], self.done_events[i], self.stop_event, self.results, self.params,
                      self.state_norm),
                daemon=True)
            p.start()
            self.workers.append(p)
        time.sleep(0.5)
        print(f"All workers have been started")

    def stop(self):
        if not self.stop_event.is_set():
            print("Stop sampling workers......")
            self.stop_event.set()
            for i, p in enumerate(self.workers):
                if p.is_alive():
                    p.join(timeout=1.0)
                    if p.is_alive():
                        print(f"Worker {i} has no response, forced terminated")
                        p.terminate()
                        p.join(timeout=0.5)
        print("All workers have been stopped")



