from typing import Optional, Union
from gymnasium import spaces
from stable_baselines3 import HerReplayBuffer
from stable_baselines3.common.buffers import DictReplayBuffer,DictRolloutBuffer
from stable_baselines3.common.type_aliases import DictReplayBufferSamples
import torch as th
from stable_baselines3.common.vec_env import VecEnv, VecNormalize
from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy

import numpy as np

from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Protocol, SupportsFloat, Tuple, Union

TensorDict = Dict[str, th.Tensor]

class MultiTaskDictReplayBufferSamples(NamedTuple):
    observations: TensorDict
    context: th.Tensor
    actions: th.Tensor
    next_observations: TensorDict
    dones: th.Tensor
    rewards: th.Tensor

# 多环境Buffer
class MultiTaskHerReplayBuffer(HerReplayBuffer):
    def init_env_sample(self, env_ids:list[int]):
        self.env_ids = env_ids
        self.cur_env_id = 0

    def sample(self,meta_batch_size: int, batch_size: int, env: VecNormalize = None) -> MultiTaskDictReplayBufferSamples:
        """
        Sample elements from the replay buffer.

        :param batch_size: Number of element to sample
        :param env: Associated VecEnv to normalize the observations/rewards when sampling
        :return: Samples
        """
        # When the buffer is full, we rewrite on old episodes. We don't want to
        # sample incomplete episode transitions, so we have to eliminate some indexes.
        is_valid = self.ep_length > 0
        if not np.any(is_valid):
            raise RuntimeError(
                "Unable to sample before the end of the first episode. We recommend choosing a value "
                "for learning_starts that is greater than the maximum number of timesteps in the environment."
            )
        # Get the indices of valid transitions
        # Example:
        # if is_valid = [[True, False, False], [True, False, True]],
        # is_valid has shape (buffer_size=2, n_envs=3)
        # then valid_indices = [0, 3, 5]
        # they correspond to is_valid[0, 0], is_valid[1, 0] and is_valid[1, 2]
        # or in numpy format ([rows], [columns]): (array([0, 1, 1]), array([0, 0, 2]))
        # Those indices are obtained back using np.unravel_index(valid_indices, is_valid.shape)
        valid_indices = np.flatnonzero(is_valid)
        # Sample valid transitions that will constitute the minibatch of size batch_size
        sampled_indices = np.random.choice(valid_indices, size=batch_size, replace=True)
        # Unravel the indexes, i.e. recover the batch and env indices.
        # Example: if sampled_indices = [0, 3, 5], then batch_indices = [0, 1, 1] and env_indices = [0, 0, 2]
        batch_indices, env_indices = np.unravel_index(sampled_indices, is_valid.shape)

        # 这里只使用 batch_indices 选择数据的序号

        # Split the indexes between real and virtual transitions.
        nb_virtual = int(self.her_ratio * batch_size)
        virtual_batch_indices, real_batch_indices = np.split(batch_indices, [nb_virtual])
        virtual_env_indices, real_env_indices = np.split(env_indices, [nb_virtual])

        real_data,virtual_data = [],[]
        
        min_sample_num = batch_size
        for env_id in range(self.n_envs):
            cur_num = sum(virtual_env_indices==env_id) + sum(real_env_indices==env_id)
            min_sample_num = min(min_sample_num, cur_num)
    
        for _ in range(meta_batch_size):

            env_id = self.env_ids[self.cur_env_id % len(self.env_ids)]

            _virtual_batch_indices = virtual_batch_indices[virtual_env_indices == env_id]
            _real_batch_indices = real_batch_indices[real_env_indices == env_id]

            if len(_virtual_batch_indices) + len(_real_batch_indices) > min_sample_num:
                _virtual_num = int(len(_virtual_batch_indices) / (len(_real_batch_indices) + len(_virtual_batch_indices)) * min_sample_num)
                _virtual_batch_indices = _virtual_batch_indices[:_virtual_num]
                _real_batch_indices = _real_batch_indices[:min_sample_num-_virtual_num]

            _real_env_indices = np.ones(_real_batch_indices.shape,dtype=int) * env_id
            _virtual_env_indices = np.ones(_virtual_batch_indices.shape,dtype=int) * env_id
            # Get real and virtual data
            _real_data = self._get_real_samples(_real_batch_indices, _real_env_indices, env)
            # Create virtual transitions by sampling new desired goals and computing new rewards
            _virtual_data = self._get_virtual_samples(_virtual_batch_indices, _virtual_env_indices, env)
            real_data.append(_real_data)
            virtual_data.append(_virtual_data)
            self.cur_env_id += 1

        # Concatenate real and virtual data
        observation_keys = _real_data.observations.keys()
        # [meta_batch_size,batch_size,dim]
        observations = {
            key: th.stack(
                [th.cat((_real_data.observations[key],_virtual_data.observations[key]),dim=0) 
                 for _real_data,_virtual_data in zip(real_data,virtual_data)],dim=0
            )
            for key in observation_keys
        }

        actions = th.stack(
            [th.cat((_real_data.actions,_virtual_data.actions),dim=0) 
                for _real_data,_virtual_data in zip(real_data,virtual_data)],dim=0
        )
        next_observations = {
            key: th.stack(
                [th.cat((_real_data.next_observations[key],_virtual_data.next_observations[key]),dim=0) 
                 for _real_data,_virtual_data in zip(real_data,virtual_data)],dim=0
            )
            for key in observation_keys
        }
        dones = th.stack(
            [th.cat((_real_data.dones,_virtual_data.dones),dim=0) 
                for _real_data,_virtual_data in zip(real_data,virtual_data)],dim=0
        )
        rewards = th.stack(
            [th.cat((_real_data.rewards,_virtual_data.rewards),dim=0) 
                for _real_data,_virtual_data in zip(real_data,virtual_data)],dim=0
        )

        return DictReplayBufferSamples(
            observations=observations,
            actions=actions,
            next_observations=next_observations,
            dones=dones,
            rewards=rewards,
        )

    def set_env(self, env: VecEnv) -> None:
        """
        Sets the environment.

        :param env:
        """

        self.env = env

class MultiTaskDictReplayBuffer(DictReplayBuffer):

    def init_env_sample(self, env_ids:list[int]):
        self.env_ids = env_ids
        self.cur_env_id = 0

    def sample(self, meta_batch_size: int, batch_size: int, env: VecNormalize = None) -> MultiTaskDictReplayBufferSamples:
        """
        :param meta_batch_size: Number of envs to sample
        :param batch_size: Number of element to sample
        :param env: associated gym VecEnv
            to normalize the observations/rewards when sampling
        :return:
        """
        upper_bound = self.buffer_size if self.full else self.pos
        batch_inds = np.random.randint(0, upper_bound, size=batch_size)
        data = []
        for _ in range(meta_batch_size):
            env_id = self.env_ids[self.cur_env_id % len(self.env_ids)]
            env_indices = np.ones(batch_inds.shape,dtype=int) * env_id
            _data = self._get_samples(env_indices,batch_inds,env)
            data.append(_data)
            self.cur_env_id += 1
        # Concatenate real and virtual data
        observation_keys = _data.observations.keys()
        # [meta_batch_size,batch_size,dim]
        observations = {
            key: th.stack([_data.observations[key] for _data in data],dim=0)
            for key in observation_keys
        }
        # 上下文
        context = th.stack([_data.context for _data in data],dim=0)
        actions = th.stack([_data.actions for _data in data],dim=0)

        next_observations = {
            key: th.stack([_data.next_observations[key] for _data in data],dim=0)
            for key in observation_keys
        }

        dones = th.stack([_data.dones for _data in data],dim=0)
        rewards = th.stack([_data.rewards for _data in data],dim=0)

        return MultiTaskDictReplayBufferSamples(
            observations=observations,
            context = context,
            actions=actions,
            next_observations=next_observations,
            dones=dones,
            rewards=rewards,
        )
    
    def _get_samples(self,env_indices: np.ndarray, batch_inds: np.ndarray, env: VecNormalize = None) -> MultiTaskDictReplayBufferSamples:
        # Normalize if needed and remove extra dimension (we are using only one env for now)
        obs_ = self._normalize_obs({key: obs[batch_inds, env_indices, :] for key, obs in self.observations.items()}, env)
        next_obs_ = self._normalize_obs(
            {key: obs[batch_inds, env_indices, :] for key, obs in self.next_observations.items()}, env
        )

        # dones[batch_inds:batch_inds+50]
        context = []
        for batch_id,env_id in zip(batch_inds,env_indices):
            _context = self.get_context(batch_id, env_id)
            context.append(_context)
        context = np.stack(context, axis=0)

        # Convert to torch tensor
        observations = {key: self.to_torch(obs) for key, obs in obs_.items()}
        next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()}

        return MultiTaskDictReplayBufferSamples(
            observations=observations,
            context=self.to_torch(context),
            actions=self.to_torch(self.actions[batch_inds, env_indices]),
            next_observations=next_observations,
            # Only use dones that are not due to timeouts
            # deactivated by default (timeouts is initialized as an array of False)
            dones=self.to_torch(self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(
                -1, 1
            ),
            rewards=self.to_torch(self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env)),
        )
    
    def get_context(self,batch_id, env_id):
        _context = []
        for i in range(0, -50, -1):
            past_id = batch_id + i
            if past_id < 0 or (self.dones[past_id, env_id] and i < 0):
                break
            
            _obs = []
            for key in self.observations:
                if key == 'task_z': continue
                _obs.append(self.observations[key][past_id, env_id])
            _obs = np.concatenate(_obs,axis=0)

            cur_context = np.concatenate((np.array([self.rewards[past_id, env_id]]), self.actions[past_id, env_id], _obs) ,axis=0)
            _context.append(cur_context)
            
        _context = np.stack(_context, axis=0)
        _context = np.concatenate((_context, np.zeros((50 - _context.shape[0], _context.shape[1]))), axis=0)
        _context[0] = 0 # 第一个状态被mask
        return _context
