import collections
import typing
from typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Tuple
import numpy as np


class Experience(typing.NamedTuple):
    state_last: tuple[np.ndarray, np.ndarray]  # 前一个状态
    action: int
    reward: float
    state: tuple[np.ndarray, np.ndarray]
    game_id: str
    round_num: int


class Transition(typing.NamedTuple):
    s_tm1: Optional[np.ndarray]
    a_tm1: int
    r_t: Optional[float]
    discount_t: Optional[float]
    s_t: Optional[np.ndarray]


def _power(base, exponent) -> np.ndarray:
    """Same as usual power except `0 ** 0` is zero."""
    # By default, 0 ** 0 is 1, but we never want indices with priority zero to be
    # sampled, even if the priority exponent is zero.
    base = np.asarray(base)
    return np.where(base == 0.0, 0.0, base**exponent)


def importance_sampling_weights(
    probabilities: np.ndarray,
    uniform_probability: float,
    exponent: float,
    normalize: bool,
) -> np.ndarray:
    """Calculates importance sampling weights from given sampling probabilities.

    Args:
        probabilities: Array of sampling probabilities for a subset of items. Since
        this is a subset the probabilities will typically not sum to `1`.
        uniform_probability: Probability of sampling an item if uniformly sampling.
        exponent: Scalar that controls the amount of importance sampling correction
        in the weights. Where `1` corrects fully and `0` is no correction
        (resulting weights are all `1`).
        normalize: Whether to scale all weights so that the maximum weight is `1`.
        Can be enabled for stability since weights will only scale down.

    Returns:
        Importance sampling weights that can be used to scale the loss. These have
        the same shape as `probabilities`.
    """
    if not 0.0 <= exponent <= 1.0:
        raise ValueError('Require 0 <= exponent <= 1.')
    if not 0.0 <= uniform_probability <= 1.0:
        raise ValueError('Expected 0 <= uniform_probability <= 1.')

    weights = (uniform_probability / probabilities) ** exponent
    if normalize:
        weights /= np.max(weights)
    if not np.isfinite(weights).all():
        raise ValueError('Weights are not finite: %s.' % weights)
    return weights


class SumTree:
    """A binary tree where non-leaf nodes are the sum of child nodes.

    Leaf nodes contain non-negative floats and are set externally. Non-leaf nodes
    are the sum of their children. This data structure allows O(log n) updates and
    O(log n) queries of which index corresponds to a given sum. The main use
    case is sampling from a multinomial distribution with many probabilities
    which are updated a few at a time.
    """

    def __init__(self):
        """Initializes an empty `SumTree`."""
        # When there are n values, the storage array will have size 2 * n. The first
        # n elements are non-leaf nodes (ignoring the very first element), with
        # index 1 corresponding to the root node. The next n elements are leaf nodes
        # that contain values. A non-leaf node with index i has children at
        # locations 2 * i, 2 * i + 1.
        self._size = 0
        self._storage = np.zeros(0, dtype=np.float64)
        self._first_leaf = 0  # Boundary between non-leaf and leaf nodes.

    def resize(self, size: int) -> None:
        """Resizes tree, truncating or expanding with zeros as needed."""
        self._initialize(size, values=None)

    def get(self, indices: Sequence[int]) -> np.ndarray:
        """Gets values corresponding to given indices."""
        indices = np.asarray(indices)
        if not ((0 <= indices) & (indices < self.size)).all():
            raise IndexError('index out of range, expect 0 <= index < %s' % self.size)
        return self.values[indices]

    def set(self, indices: Sequence[int], values: Sequence[float]) -> None:
        """Sets values at the given indices."""
        values = np.asarray(values)
        if not np.isfinite(values).all() or (values < 0.0).any():
            raise ValueError('value must be finite and positive.')
        self.values[indices] = values
        storage = self._storage
        for idx in np.asarray(indices) + self._first_leaf:
            parent = idx // 2
            while parent > 0:
                # At this point the subtree with root parent is consistent.
                storage[parent] = storage[2 * parent] + storage[2 * parent + 1]
                parent //= 2

    def set_all(self, values: Sequence[float]) -> None:
        """Sets many values all at once, also setting size of the sum tree."""
        values = np.asarray(values)
        if not np.isfinite(values).all() or (values < 0.0).any():
            raise ValueError('Values must be finite positive numbers.')
        self._initialize(len(values), values)

    def query(self, targets: Sequence[float]) -> Sequence[int]:
        """Finds smallest indices where `target <` cumulative value sum up to index.

        Args:
          targets: The target sums.

        Returns:
          For each target, the smallest index such that target is strictly less than
          the cumulative sum of values up to and including that index.

        Raises:
          ValueError: if `target >` sum of all values or `target < 0` for any
            of the given targets.
        """
        return [self._query_single(t) for t in targets]

    def root(self) -> float:
        """Returns sum of values."""
        return self._storage[1] if self.size > 0 else np.nan

    @property
    def values(self) -> np.ndarray:
        """View of array containing all (leaf) values in the sum tree."""
        return self._storage[self._first_leaf : self._first_leaf + self.size]

    @property
    def size(self) -> int:
        """Number of (leaf) values in the sum tree."""
        return self._size

    @property
    def capacity(self) -> int:
        """Current sum tree capacity (exceeding it will trigger resizing)."""
        return self._first_leaf

    def get_state(self) -> Mapping[str, Any]:
        """Retrieves sum tree state as a dictionary (e.g. for serialization)."""
        return {
            'size': self._size,
            'storage': self._storage,
            'first_leaf': self._first_leaf,
        }

    def set_state(self, state: Mapping[str, Any]) -> None:
        """Sets sum tree state from a (potentially de-serialized) dictionary."""
        self._size = state['size']
        self._storage = state['storage']
        self._first_leaf = state['first_leaf']

    def check_valid(self) -> Tuple[bool, str]:
        """Checks internal consistency."""
        if len(self._storage) != 2 * self._first_leaf:
            return False, 'first_leaf should be half the size of storage.'
        if not 0 <= self.size <= self.capacity:
            return False, 'Require 0 <= self.size <= self.capacity.'
        if len(self.values) != self.size:
            return False, 'Number of values should be equal to the size.'
        storage = self._storage
        for i in range(1, self._first_leaf):
            if storage[i] != storage[2 * i] + storage[2 * i + 1]:
                return False, 'Non-leaf node %d should be sum of child nodes.' % i
        return True, ''

    def _initialize(self, size: int, values: Optional[Sequence[float]]) -> None:
        """Resizes storage and sets new values if supplied."""
        assert size >= 0
        assert values is None or len(values) == size

        if size < self.size:  # Keep storage and values, zero out extra values.
            if values is None:
                new_values = self.values[:size]  # Truncate existing values.
            else:
                new_values = values
            self._size = size
            self._set_values(new_values)
            # self._first_leaf remains the same.
        elif size <= self.capacity:  # Reuse same storage, but size increases.
            self._size = size
            if values is not None:
                self._set_values(values)
            # self._first_leaf remains the same.
            # New activated leaf nodes are already zero and sum nodes already correct.
        else:  # Allocate new storage.
            new_capacity = 1
            while new_capacity < size:
                new_capacity *= 2
            new_storage = np.empty((2 * new_capacity,), dtype=np.float64)
            if values is None:
                new_values = self.values
            else:
                new_values = values
            self._storage = new_storage
            self._first_leaf = new_capacity
            self._size = size
            self._set_values(new_values)

    def _set_values(self, values: Sequence[float]) -> None:
        """Sets values assuming storage has enough capacity and update sums."""
        # Note every part of the storage is set here.
        assert len(values) <= self.capacity
        storage = self._storage
        storage[self._first_leaf : self._first_leaf + len(values)] = values
        storage[self._first_leaf + len(values) :] = 0
        for i in range(self._first_leaf - 1, 0, -1):
            storage[i] = storage[2 * i] + storage[2 * i + 1]
        storage[0] = 0.0  # Unused.

    def _query_single(self, target: float) -> int:
        """Queries a single target, see query for more detailed documentation."""
        if not 0.0 <= target < self.root():
            raise ValueError('Require 0 <= target < total sum.')

        storage = self._storage
        idx = 1  # Root node.
        while idx < self._first_leaf:
            # At this point we always have target < storage[idx].
            assert target < storage[idx]
            left_idx = 2 * idx
            right_idx = left_idx + 1
            left_sum = storage[left_idx]
            if target < left_sum:
                idx = left_idx
            else:
                idx = right_idx
                target -= left_sum

        assert idx < 2 * self.capacity
        return idx - self._first_leaf


class PrioritizedDistribution:
    """Distribution for weighted sampling of user-defined integer IDs."""

    def __init__(
            self,
            priority_exponent: float,
            uniform_sample_probability: float,
            random_state: np.random.RandomState,
            min_capacity: int = 0,
            max_capacity: Optional[int] = None,
    ):
        if priority_exponent < 0.0:
            raise ValueError('Require priority_exponent >= 0.')
        self._priority_exponent = priority_exponent
        if not 0.0 <= uniform_sample_probability <= 1.0:
            raise ValueError('Require 0 <= uniform_sample_probability <= 1.')
        if max_capacity is not None and max_capacity < min_capacity:
            raise ValueError('Require max_capacity >= min_capacity.')
        if min_capacity < 0:
            raise ValueError('Require min_capacity >= 0.')
        self._uniform_sample_probability = uniform_sample_probability
        self._max_capacity = max_capacity
        self._sum_tree = SumTree()
        self._sum_tree.resize(min_capacity)
        self._random_state = random_state
        self._id_to_index = {}  # User ID -> sum tree index.
        self._index_to_id = {}  # Sum tree index -> user ID.
        # Unused sum tree indices that can be allocated to new user IDs.
        self._inactive_indices = list(range(min_capacity))
        # Currently used sum tree indices, needed for uniform sampling.
        self._active_indices = []
        # Maps an active index to its location in active_indices_, for removal.
        self._active_indices_location = {}

    def ensure_capacity(self, capacity: int) -> None:
        """Ensures sufficient capacity, a no-op if capacity is already enough."""
        if self._max_capacity is not None and capacity > self._max_capacity:
            raise ValueError(
                'capacity %d cannot exceed max_capacity %d'
                % (capacity, self._max_capacity)
            )
        if capacity <= self._sum_tree.size:
            return  # There is already sufficient capacity.
        self._inactive_indices.extend(range(self._sum_tree.size, capacity))
        self._sum_tree.resize(capacity)

    def add_priorities(
            self, ids: Sequence[int], priorities: Sequence[float]
    ) -> None:
        """Add priorities for new IDs."""
        for i in ids:
            if i in self._id_to_index:
                raise IndexError('ID %d already exists.' % i)

        new_size = self.size + len(ids)
        if self._max_capacity is not None and new_size > self._max_capacity:
            raise ValueError('Cannot add IDs as max capacity would be exceeded.')

        # Expand to accommodate new IDs if needed.
        if new_size > self.capacity:
            candidate_capacity = max(new_size, 2 * self.capacity)
            if self._max_capacity is None:
                new_capacity = candidate_capacity
            else:
                new_capacity = min(self._max_capacity, candidate_capacity)
            self.ensure_capacity(new_capacity)

        # Assign unused indices to IDs.
        indices = []
        for i in ids:
            idx = self._inactive_indices.pop()
            self._active_indices_location[idx] = len(self._active_indices)
            self._active_indices.append(idx)
            self._id_to_index[i] = idx
            self._index_to_id[idx] = i
            indices.append(idx)

        # Set priorities on sum tree.
        self._sum_tree.set(indices, _power(priorities, self._priority_exponent))

    def remove_priorities(self, ids: Sequence[int]) -> None:
        """Remove priorities associated with given IDs."""
        indices = []
        for i in ids:
            try:
                idx = self._id_to_index[i]
            except IndexError as err:
                raise IndexError('Cannot remove ID %d, it does not exist.' % i) from err
            indices.append(idx)

        for i, idx in zip(ids, indices):
            del self._id_to_index[i]
            del self._index_to_id[idx]
            # Swap index to be removed with index at the end.
            j = self._active_indices_location[idx]
            self._active_indices[j], self._active_indices[-1] = (
                self._active_indices[-1],
                self._active_indices[j],
            )
            # Update location for the swapped index.
            self._active_indices_location[self._active_indices[j]] = j
            # Remove index from data structures.
            self._active_indices_location.pop(self._active_indices.pop())

        self._inactive_indices.extend(indices)
        self._sum_tree.set(indices, np.zeros((len(indices),), dtype=np.float64))

    def update_priorities(
            self, ids: list[int], priorities: list[float]
    ) -> None:
        """Updates priorities for existing IDs."""
        indices = []
        id_to_remove = []
        for it, i in enumerate(ids):
            if i not in self._id_to_index:
                id_to_remove.append(it)
                continue
            indices.append(self._id_to_index[i])

        for it in reversed(id_to_remove):
            priorities.pop(it)
        self._sum_tree.set(indices, _power(priorities, self._priority_exponent))

    def sample(self, size: int) -> Tuple[np.ndarray, np.ndarray]:
        """Returns sample of IDs with corresponding probabilities."""
        if self.size == 0:
            raise RuntimeError('No IDs to sample.')
        uniform_indices = [
            self._active_indices[j]
            for j in self._random_state.randint(self.size, size=size)
        ]

        if self._sum_tree.root() == 0.0:
            prioritized_indices = uniform_indices
        else:
            targets = self._random_state.uniform(size=size) * self._sum_tree.root()
            prioritized_indices = np.asarray(self._sum_tree.query(targets))

        usp = self._uniform_sample_probability
        indices = np.where(
            self._random_state.uniform(size=size) < usp,
            uniform_indices,
            prioritized_indices,
            )

        uniform_prob = np.asarray(1.0 / self.size)  # np.asarray is for pytype.
        priorities = self._sum_tree.get(indices)

        if self._sum_tree.root() == 0.0:
            prioritized_probs = np.full_like(priorities, fill_value=uniform_prob)
        else:
            prioritized_probs = priorities / self._sum_tree.root()

        sample_probs = (1.0 - usp) * prioritized_probs + usp * uniform_prob
        ids = np.fromiter(
            (self._index_to_id[idx] for idx in indices),
            dtype=np.int64,
            count=len(indices),
        )
        return ids, sample_probs

    def get_exponentiated_priorities(self, ids: Sequence[int]) -> Sequence[float]:
        """Returns priority ** priority_exponent for the given indices."""
        indices = np.fromiter(
            (self._id_to_index[i] for i in ids), dtype=np.int64, count=len(ids)
        )
        return self._sum_tree.get(indices)

    def ids(self) -> Iterable[int]:
        """Returns an iterable of all current IDs."""
        return self._id_to_index.keys()

    @property
    def capacity(self) -> int:
        """Number of IDs that can be stored until memory needs to be allocated."""
        return self._sum_tree.size

    @property
    def size(self) -> int:
        """Number of IDs currently tracked."""
        return len(self._id_to_index)

    def get_state(self) -> Mapping[str, Any]:
        """Retrieves distribution state as a dictionary (e.g. for serialization)."""
        return {
            'sum_tree': self._sum_tree.get_state(),
            'id_to_index': self._id_to_index,
            'index_to_id': self._index_to_id,
            'inactive_indices': self._inactive_indices,
            'active_indices': self._active_indices,
            'active_indices_location': self._active_indices_location,
        }

    def set_state(self, state: Mapping[str, Any]) -> None:
        """Sets distribution state from a (potentially de-serialized) dictionary."""
        self._sum_tree.set_state(state['sum_tree'])
        self._id_to_index = state['id_to_index']
        self._index_to_id = state['index_to_id']
        self._inactive_indices = state['inactive_indices']
        self._active_indices = state['active_indices']
        self._active_indices_location = state['active_indices_location']

    def check_valid(self) -> Tuple[bool, str]:
        """Checks internal consistency."""
        if len(self._id_to_index) != len(self._index_to_id):
            return False, 'ID to index maps are not the same size.'
        for i in self._id_to_index:
            if self._index_to_id[self._id_to_index[i]] != i:
                return False, 'ID %d should map to itself.' % i
        # Indices map to themselves because of previous check and uniqueness.
        if len(set(self._inactive_indices)) != len(self._inactive_indices):
            return False, 'Inactive indices should be unique.'
        if len(set(self._active_indices)) != len(self._active_indices):
            return False, 'Active indices should be unique.'
        if set(self._active_indices) != set(self._index_to_id.keys()):
            return False, 'Active indices should match index to ID mapping keys.'
        all_indices = self._inactive_indices + self._active_indices
        if sorted(all_indices) != list(range(self._sum_tree.size)):
            return False, 'Inactive and active indices should partition all indices.'
        if len(self._active_indices) != len(self._active_indices_location):
            return False, 'Active indices and their location should be the same size.'
        for j, i in enumerate(self._active_indices):
            if j != self._active_indices_location[i]:
                return False, (
                        'Active index location %d not correct for index %d.' % (j, i)
                )

        return self._sum_tree.check_valid()


class PrioritizedTransitionReplay:
    """Prioritized replay, with LIFO storage for flat named tuples.

    This is the proportional variant as described in
    http://arxiv.org/abs/1511.05952.
    """

    def __init__(
            self,
            capacity: int,
            priority_exponent: float,
            importance_sampling_exponent: Callable[[int], float],
            uniform_sample_probability: float,
            normalize_weights: bool,
            random_state: np.random.RandomState,
            encoder: Optional[Callable[[Experience], Any]] = None,
            decoder: Optional[Callable[[Any], Experience]] = None,
    ):
        self._capacity = capacity
        self._random_state = random_state
        self._encoder = encoder or (lambda s: s)
        self._decoder = decoder or (lambda s: s)
        self._distribution = PrioritizedDistribution(
            min_capacity=capacity,
            max_capacity=capacity,
            priority_exponent=priority_exponent,
            uniform_sample_probability=uniform_sample_probability,
            random_state=random_state,
        )
        self._importance_sampling_exponent = importance_sampling_exponent
        self._normalize_weights = normalize_weights
        self._storage = collections.OrderedDict()  # ID -> item.
        self._t = 0  # Used to allocate IDs.

    def add(self, item: Experience, priority: float) -> None:
        """Adds a single item with a given priority to the replay buffer."""
        if self.size == self._capacity:
            oldest_id, _ = self._storage.popitem(last=False)
            self._distribution.remove_priorities([oldest_id])

        item_id = self._t
        self._distribution.add_priorities([item_id], [priority])
        self._storage[item_id] = self._encoder(item)
        self._t += 1

    def get(self, ids: Sequence[int]) -> Iterable[Experience]:
        """Retrieves items by IDs."""
        for i in ids:
            yield self._decoder(self._storage[i])

    def sample(
            self,
            size: int,
    ) -> Tuple[list[Experience], list[int], np.ndarray]:
        """Samples a batch of transitions."""
        ids, probabilities = self._distribution.sample(size)
        ids = ids.astype(int).tolist()
        weights = importance_sampling_weights(
            probabilities,
            uniform_probability=1.0 / self.size,
            exponent=self.importance_sampling_exponent,
            normalize=self._normalize_weights,
        )
        # 直接返回原始样本列表 —— 不做任何堆叠！ 
        batch = list(self.get(ids)) # 返回 List[Experience] 
        return batch, ids, weights

    def update_priorities(
            self, ids: list[int], priorities: list[float]
    ) -> None:
        """Updates IDs with given priorities."""
        self._distribution.update_priorities(ids, priorities)

    @property
    def size(self) -> int:
        """Number of elements currently contained in replay."""
        return len(self._storage)

    @property
    def capacity(self) -> int:
        """Total capacity of replay (maximum number of items that can be stored)."""
        return self._capacity

    @property
    def importance_sampling_exponent(self):
        """Importance sampling exponent at current step."""
        return self._importance_sampling_exponent(self._t)

    def get_state(self) -> Mapping[str, Any]:
        """Retrieves replay state as a dictionary (e.g. for serialization)."""
        return {
            # Serialize OrderedDict as a simpler, more common data structure.
            'storage': list(self._storage.items()),
            't': self._t,
            'distribution': self._distribution.get_state(),
        }

    def set_state(self, state: Mapping[str, Any]) -> None:
        """Sets replay state from a (potentially de-serialized) dictionary."""
        self._storage = collections.OrderedDict(state['storage'])
        self._t = state['t']
        self._distribution.set_state(state['distribution'])

    def check_valid(self) -> Tuple[bool, str]:
        """Checks internal consistency."""
        if self._t < len(self._storage):
            return False, 't should be >= storage size.'
        if set(self._storage.keys()) != set(self._distribution.ids()):
            return False, 'IDs in storage and distribution do not match.'
        return self._distribution.check_valid()


class NStepTransitionAccumulator:
    def __init__(self, n: int, discount: float):
        self.max_len = n
        self.experiences = {}
        self._discount = discount

    def add_experience(self, experience: Experience) -> Experience | None:
        ans = None
        if experience.game_id not in self.experiences:
            self.experiences[experience.game_id] = []
        temp = self.experiences[experience.game_id]
        if len(temp) >= self.max_len:
            ans = temp.pop(0)
        temp.append(experience)
        for i in range(1, len(temp)):
            tup = temp[len(temp)-i-1]
            rew = tup.reward + experience.reward * (self._discount ** i)
            temp[len(temp)-i-1] = Experience(tup[0], tup[1], rew, tup[3], tup[4], tup[5])
        return ans


