
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Type, TypeVar, Union
from copy import deepcopy
import numpy as np

import torch as th
import torch.nn as nn
import torch.nn.functional as F

from stable_baselines3 import SAC,HerReplayBuffer
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
# from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule, TrainFreq, TrainFrequencyUnit
from stable_baselines3.common.preprocessing import get_flattened_obs_dim
from stable_baselines3.sac.policies import SACPolicy
from stable_baselines3.common.utils import get_parameters_by_name, polyak_update

from stable_baselines3.common.vec_env import (
    DummyVecEnv,
    unwrap_vec_normalize,
)
from stable_baselines3.common.type_aliases import DictReplayBufferSamples

from gymnasium import spaces

from .encoder import MLPEncoder
from .replay_buffer import MultiTaskDictReplayBuffer,MultiTaskHerReplayBuffer

SelfSAC = TypeVar("SelfSAC", bound="PEARL_SAC")

class PEARL_SAC(SAC):
    """
    环境需要用将task_z也放入observation_space
    """
    def __init__(self,encoder_lr=1e-4,latent_dim=5,encoder_hidden_dim=300,kl_lambda=0.1,**args):
        self.encoder_lr=encoder_lr
        self.kl_lambda = kl_lambda
        self.latent_dim = latent_dim
        self.encoder_hidden_dim = encoder_hidden_dim
        self.eval_env = False
        super().__init__(**args)
        

    def _setup_model(self,):
        super()._setup_model()
        # encoder_input_dim = observation_dim + action_dim + reward_dim(1)
        encoder_input_dim = 0
        for key in self.observation_space:
            if key == 'task_z': continue
            encoder_input_dim += get_flattened_obs_dim(self.observation_space[key])
        encoder_input_dim += self.action_space.shape[0]
        encoder_input_dim += 1

        encoder_output_dim = 2 * self.latent_dim

        self.encoder = MLPEncoder(
            input_dim=encoder_input_dim,
            output_dim=encoder_output_dim,
            latent_dim=self.latent_dim,
            hidden_dim=self.encoder_hidden_dim,
            num_tasks=self.n_envs,
            device=self.device,
        ).to(self.device)
        self.encoder_optimizer = th.optim.Adam(self.encoder.parameters(), lr=self.encoder_lr)

        self.meta_batch_size = self.n_envs
        env_ids = [env_id for env_id in range(self.n_envs)]
        self.replay_buffer.init_env_sample(env_ids)
    
    def train(
        self,
        meta_batch_size: int,
        batch_size: int,
        gradient_steps: int
    ) -> Dict[str, float]:
        # Switch to train mode (this affects batch norm / dropout)
        self.policy.set_training_mode(True)
        # Update optimizers learning rate
        optimizers = [self.actor.optimizer, self.critic.optimizer]
        if self.ent_coef_optimizer is not None:
            optimizers += [self.ent_coef_optimizer]

        # Update learning rate according to lr schedule
        self._update_learning_rate(optimizers)

        ent_coef_losses, ent_coefs = [], []
        actor_losses, critic_losses = [], []
        encoder_losses = []

        for gradient_step in range(gradient_steps):
            # Sample replay buffer
            # meta_batch_size 和 初始化env_ids相同保证两者是一致的
            replay_data = self.replay_buffer.sample(meta_batch_size, batch_size, env=self._vec_normalize_env)  # type: ignore[union-attr]

            # We need to sample because `log_std` may have changed between two gradient steps
            if self.use_sde:
                self.actor.reset_noise()
            
            # update encoder
            # 拼接状态动作奖励
            
            context_batch = replay_data.context
            _meta_batch_size,_batch_size,eps_len, feature_num = context_batch.shape
            context_batch = context_batch.view(_meta_batch_size * batch_size, eps_len, feature_num)

            # 推测后验分布 z ~ posterior q(z|c)
            self.encoder.infer_posterior(context_batch.float())
            task_z = self.encoder.task_z

            feature_num = task_z.shape[-1]

            task_z = task_z.view(_meta_batch_size* _batch_size, feature_num)

            # 인코더의 KL-Divergence 손실 계산
            kl_div = self.encoder.compute_kl_div()
            encoder_loss = self.kl_lambda * kl_div
            self.encoder_optimizer.zero_grad()
            encoder_loss.backward(retain_graph=True)
            encoder_losses.append(encoder_loss.item())

            # cat with task_z

            observations_detach_task_z = {}
            for key in replay_data.observations:
                replay_data.observations[key] = replay_data.observations[key].view(meta_batch_size * batch_size, -1)
                observations_detach_task_z[key] = replay_data.observations[key]
            replay_data.observations['task_z'] = task_z
            observations_detach_task_z['task_z'] = task_z.detach()
            
            for key in replay_data.next_observations:
                replay_data.next_observations[key] = replay_data.next_observations[key].view(meta_batch_size * batch_size, -1)
            replay_data.next_observations['task_z'] = task_z

            replay_data = DictReplayBufferSamples(
                replay_data.observations,
                replay_data.actions.view(meta_batch_size * batch_size, -1),
                replay_data.next_observations,
                replay_data.dones.view(meta_batch_size * batch_size, -1),
                replay_data.rewards.view(meta_batch_size * batch_size, -1)
            )

            replay_data_detach_task_z = DictReplayBufferSamples(
                observations_detach_task_z,
                replay_data.actions.view(meta_batch_size * batch_size, -1),
                replay_data.next_observations,
                replay_data.dones.view(meta_batch_size * batch_size, -1),
                replay_data.rewards.view(meta_batch_size * batch_size, -1)
            )
            
            # Action by the current actor for the sampled state
            actions_pi, log_prob = self.actor.action_log_prob(replay_data_detach_task_z.observations)
            log_prob = log_prob.reshape(-1, 1)

            ent_coef_loss = None
            if self.ent_coef_optimizer is not None and self.log_ent_coef is not None:
                # Important: detach the variable from the graph
                # so we don't change it with other losses
                # see https://github.com/rail-berkeley/softlearning/issues/60
                ent_coef = th.exp(self.log_ent_coef.detach())
                ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()
                ent_coef_losses.append(ent_coef_loss.item())
            else:
                ent_coef = self.ent_coef_tensor

            ent_coefs.append(ent_coef.item())

            # Optimize entropy coefficient, also called
            # entropy temperature or alpha in the paper
            if ent_coef_loss is not None and self.ent_coef_optimizer is not None:
                self.ent_coef_optimizer.zero_grad()
                ent_coef_loss.backward()
                self.ent_coef_optimizer.step()

            with th.no_grad():
                # Select action according to policy
                next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)
                # Compute the next Q values: min over all critics targets
                next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
                next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
                # add entropy term
                next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)
                # td error + entropy term
                target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
            
            # Get current Q-values estimates for each critic network
            # using action from the replay buffer
            current_q_values = self.critic(replay_data.observations, replay_data.actions)
            
            # Compute critic loss
            critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values)
            assert isinstance(critic_loss, th.Tensor)  # for type checker
            critic_losses.append(critic_loss.item())  # type: ignore[union-attr]

            # Optimize the critic
            self.critic.optimizer.zero_grad()
            critic_loss.backward()
            self.critic.optimizer.step()

            self.encoder_optimizer.step()

            # Compute actor loss
            # Alternative: actor_loss = th.mean(log_prob - qf1_pi)
            # Min over all critic networks
            # replay_data.observations['task_z'] = replay_data.observations['task_z'].detach()
            q_values_pi = th.cat(self.critic(replay_data_detach_task_z.observations, actions_pi), dim=1)
            min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)
            actor_loss = (ent_coef * log_prob - min_qf_pi).mean()
            actor_losses.append(actor_loss.item())

            # Optimize the actor
            self.actor.optimizer.zero_grad()
            actor_loss.backward()
            self.actor.optimizer.step()

            # Update target networks
            if gradient_step % self.target_update_interval == 0:
                polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
                # Copy running stats, see GH issue #996
                polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)

        self._n_updates += gradient_steps

        self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
        self.logger.record("train/ent_coef", np.mean(ent_coefs))
        self.logger.record("train/actor_loss", np.mean(actor_losses))
        self.logger.record("train/critic_loss", np.mean(critic_losses))
        self.logger.record("train/encode_loss", np.mean(encoder_losses))
        if len(ent_coef_losses) > 0:
            self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
    
    # 用pickle保存的参数
    def _excluded_save_params(self) -> List[str]:
        return super()._excluded_save_params() + ["actor", "critic", "critic_target","latent_dim","encoder_hidden_dim"]  # noqa: RUF005

    # 用torch.save保存参数
    def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
        state_dicts = ["policy", "actor.optimizer", "critic.optimizer","encoder"]
        if self.ent_coef_optimizer is not None:
            saved_pytorch_variables = ["log_ent_coef"]
            state_dicts.append("ent_coef_optimizer")
        else:
            saved_pytorch_variables = ["ent_coef_tensor"]
        return state_dicts, saved_pytorch_variables

    def _store_transition(
        self,
        replay_buffer: ReplayBuffer,
        buffer_action: np.ndarray,
        new_obs: Union[np.ndarray, Dict[str, np.ndarray]],
        reward: np.ndarray,
        dones: np.ndarray,
        infos: List[Dict[str, Any]],
    ) -> None:
        """
        Store transition in the replay buffer.
        We store the normalized action and the unnormalized observation.
        It also handles terminal observations (because VecEnv resets automatically).

        :param replay_buffer: Replay buffer object where to store the transition.
        :param buffer_action: normalized action
        :param new_obs: next observation in the current episode
            or first observation of the episode (when dones is True)
        :param reward: reward for the current transition
        :param dones: Termination signal
        :param infos: List of additional information about the transition.
            It may contain the terminal observations and information about timeout.
        """
        # Store only the unnormalized version
        if self._vec_normalize_env is not None:
            new_obs_ = self._vec_normalize_env.get_original_obs()
            reward_ = self._vec_normalize_env.get_original_reward()
        else:
            # Avoid changing the original ones
            self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward

        # Avoid modification by reference
        next_obs = deepcopy(new_obs_)
        
        # As the VecEnv resets automatically, new_obs is already the
        # first observation of the next episode
        for i, done in enumerate(dones):
            if done and infos[i].get("terminal_observation") is not None:
                if isinstance(next_obs, dict):
                    next_obs_ = infos[i]["terminal_observation"]
                    # VecNormalize normalizes the terminal observation
                    if self._vec_normalize_env is not None:
                        next_obs_ = self._vec_normalize_env.unnormalize_obs(next_obs_)
                    # Replace next obs for the correct envs
                    for key in next_obs.keys():
                        next_obs[key][i] = next_obs_[key]

                else:
                    next_obs[i] = infos[i]["terminal_observation"]
                    # VecNormalize normalizes the terminal observation
                    if self._vec_normalize_env is not None:
                        next_obs[i] = self._vec_normalize_env.unnormalize_obs(next_obs[i, :])

        replay_buffer.add(
            self._last_original_obs,
            next_obs,
            buffer_action,
            reward_,
            dones,
            infos,
        )
        self._last_obs = new_obs
        context_batch = []
        for env_id in range(self.n_envs):
            _context_batch = replay_buffer.get_context(replay_buffer.pos, env_id)
            context_batch.append(_context_batch)
        context_batch = np.stack(context_batch,axis=0)
        self.encoder.infer_posterior(replay_buffer.to_torch(context_batch).float())
        
        self._last_obs['task_z'] = self.encoder.task_z.cpu().detach().numpy()
        # Save the unnormalized observation
        if self._vec_normalize_env is not None:
            self._last_original_obs = new_obs_

    def update_context(self, obs: np.ndarray, action: np.ndarray, reward: np.ndarray) -> None:
        # 向当前context添加一个transition
        obs = obs.reshape((self.n_envs, 1, obs.shape[-1]))
        action = action.reshape((self.n_envs, 1, action.shape[-1]))
        reward = reward.reshape((self.n_envs, 1, 1))
        
        obs = th.from_numpy(obs).float().to(self.device)
        action = th.from_numpy(action).float().to(self.device)
        reward = th.from_numpy(reward).float().to(self.device)
        transition = th.cat([obs, action, reward], dim=-1).to(self.device)

        if self.encoder.context is None:
            self.encoder.context = transition
        else:
            self.encoder.context = th.cat([self.encoder.context,transition], dim=1).to(self.device)

    def learn(
        self: SelfSAC,
        total_timesteps: int,
        callback: MaybeCallback = None,
        log_interval: int = 4,
        tb_log_name: str = "PEARL_SAC",
        reset_num_timesteps: bool = True,
        progress_bar: bool = False,
    ) -> SelfSAC:
        total_timesteps, callback = self._setup_learn(
            total_timesteps,
            callback,
            reset_num_timesteps,
            tb_log_name,
            progress_bar,
        )

        callback.on_training_start(locals(), globals())

        while self.num_timesteps < total_timesteps:
            self.update_posterior = True
            self.encoder.clear_z()
            rollout = self.collect_rollouts(
                self.env,
                train_freq=self.train_freq,
                action_noise=self.action_noise,
                callback=callback,
                learning_starts=self.learning_starts,
                replay_buffer=self.replay_buffer,
                log_interval=log_interval,
            )

            if rollout.continue_training is False:
                break

            if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:
                # If no `gradient_steps` is specified,
                # do as many gradients steps as steps performed during the rollout
                gradient_steps = self.gradient_steps if self.gradient_steps >= 0 else rollout.episode_timesteps
                # Special case when the user passes `gradient_steps=0`
                if gradient_steps > 0:
                    self.train(self.meta_batch_size,batch_size=self.batch_size, gradient_steps=gradient_steps)

        callback.on_training_end()

        return self

    # 重新设置环境
    def reset_test_env(self,env):
        # reset env
        env = self._wrap_env(env, self.verbose, monitor_wrapper=True)

        self.observation_space = env.observation_space
        self.action_space = env.action_space
        self.n_envs = env.num_envs
        self.env = env

        # get VecNormalize object if needed
        self._vec_normalize_env = unwrap_vec_normalize(env)
        
        if issubclass(self.replay_buffer_class, HerReplayBuffer):
            self.replay_buffer.set_env(env)
        
        self.eval_env = True
        self.update_posterior = True
        self.add_to_enc_buffer = False
    
    # 切换环境时 变为不是随机采样
    def _sample_action(
        self,
        learning_starts: int,
        action_noise: Optional[ActionNoise] = None,
        n_envs: int = 1,
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Sample an action according to the exploration policy.
        This is either done by sampling the probability distribution of the policy,
        or sampling a random action (from a uniform distribution over the action space)
        or by adding noise to the deterministic output.

        :param action_noise: Action noise that will be used for exploration
            Required for deterministic policy (e.g. TD3). This can also be used
            in addition to the stochastic policy for SAC.
        :param learning_starts: Number of steps before learning for the warm-up phase.
        :param n_envs:
        :return: action to take in the environment
            and scaled action that will be stored in the replay buffer.
            The two differs when the action space is not normalized (bounds are not [-1, 1]).
        """
        # Select action randomly or according to policy
        if not self.eval_env:
            if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):
                # Warmup phase
                unscaled_action = np.array([self.action_space.sample() for _ in range(n_envs)])
            else:
                # Note: when using continuous actions,
                # we assume that the policy uses tanh to scale the action
                # We use non-deterministic action in the case of SAC, for TD3, it does not matter
                unscaled_action, _ = self.predict(self._last_obs, deterministic=False)
        else:
            unscaled_action, _ = self.predict(self._last_obs, deterministic=False)

        # Rescale the action from [low, high] to [-1, 1]
        if isinstance(self.action_space, spaces.Box):
            scaled_action = self.policy.scale_action(unscaled_action)

            # Add noise to the action (improve exploration)
            if action_noise is not None:
                scaled_action = np.clip(scaled_action + action_noise(), -1, 1)

            # We store the scaled action in the buffer
            buffer_action = scaled_action
            action = self.policy.unscale_action(scaled_action)
        else:
            # Discrete case, no need to normalize or clip
            buffer_action = unscaled_action
            action = buffer_action
        return action, buffer_action
