from numpy import (
    ndarray,
    zeros,
    sum as np_sum,
    divide as np_divide,
    power as np_power,
    max as np_max,
    where as np_where,
    count_nonzero as np_count_nonzero,
)
from numpy.random import default_rng


class ExperienceBuffer:
    def __init__(
            self,
            rng: default_rng,
            buffer_size: int,
            priority_scale_alpha: float,  # alpha=0 时为均匀采样，alpha=1 时为完全优先采样
            importance_sampling_correction_beta: float,  # beta=1 时为完全修正，beta=0 时无修正
    ) -> None:
        """
        初始化经验缓冲区。

        参数:
            rng: 随机数生成器
            buffer_size: 缓冲区的大小
            priority_scale_alpha: 优先级缩放因子
            importance_sampling_correction_beta: 重要性采样修正因子
        """
        self.rng: default_rng = rng  # 随机数生成器
        self.write_pointer: int = 0  # 写入指针

        self.buffer_size = buffer_size  # 缓冲区大小
        self.buffer: list = [{}] * self.buffer_size  # 经验缓冲区，初始化为空字典的列表
        self.priorities: ndarray = zeros(self.buffer_size, dtype='float32')  # 经验的优先级
        self.probabilities: ndarray = zeros(self.buffer_size, dtype='float32')  # 经验的概率，计算为优先级除以优先级总和

        self.priority_scale_alpha: float = priority_scale_alpha  # 优先级缩放因子
        self.importance_sampling_correction_beta: float = importance_sampling_correction_beta  # 重要性采样修正因子
        self.min_priority: float = 1e-20  # 最小优先级
        self.max_priority: float = self.min_priority  # 最大优先级，初始化为最小优先级

    def get_len(self) -> int:
        """
        获取当前缓冲区中有效经验的数量。

        返回:
            int: 当前有效经验的数量
        """
        return np_count_nonzero(self.priorities)  # 计算优先级不为0的经验数量

    def add_experience(
            self,
            experience: dict,
    ) -> None:
        """
        向缓冲区中添加新的经验。

        参数:
            experience: 经验，字典形式
        """
        self.buffer[self.write_pointer] = experience.copy()  # 将经验复制到缓冲区的当前写入位置
        self.priorities[self.write_pointer] = self.max_priority  # 设置该经验的优先级为当前最大优先级

        self.write_pointer += 1  # 更新写入指针
        self.write_pointer = self.write_pointer % self.buffer_size  # 确保写入指针在缓冲区大小范围内

    def sample(
            self,
            batch_size: int,
    ) -> tuple[list, ndarray, ndarray]:
        """
        从缓冲区中采样一批经验。

        参数:
            batch_size: 需要采样的经验数量

        返回:
            tuple: 包含采样的经验、采样的经验索引和采样的权重
                - list: 采样的经验列表
                - ndarray: 采样的经验索引
                - ndarray: 经验的权重
        """
        # 更新概率
        priority_sum = np_sum(self.priorities)  # 计算优先级总和
        self.probabilities = np_divide(self.priorities, priority_sum, dtype='float32')  # 计算每个经验的采样概率

        # 采样
        sample_experience_ids = self.rng.choice(
            a=self.buffer_size,
            size=batch_size,
            replace=False,  # 选择的经验索引是否可以重复选择
            p=self.probabilities,
        )

        sample_experiences = [self.buffer[ii] for ii in sample_experience_ids]  # 获取采样的经验
        sample_probabilities = self.probabilities[sample_experience_ids]  # 获取采样的经验的概率

        # 计算重要性采样权重
        sample_importance_weights = np_power(sample_probabilities,
                                             -self.importance_sampling_correction_beta, dtype='float32')
        sample_importance_weights = np_divide(sample_importance_weights,
                                              np_max(sample_importance_weights), dtype='float32')

        return (
            sample_experiences,
            sample_experience_ids,
            sample_importance_weights,
        )

    def adjust_priorities(
            self,
            experience_ids: ndarray,
            new_priorities: ndarray,
    ) -> None:
        """
        调整指定经验的优先级。

        参数:
            experience_ids: 需要调整优先级的经验索引
            new_priorities: 新的优先级
        """
        new_priorities = np_power(new_priorities, self.priority_scale_alpha, dtype='float32')  # 应用优先级缩放因子
        self.priorities[experience_ids] = np_where(new_priorities > self.min_priority,
                                                   new_priorities, self.min_priority)  # 更新优先级，确保不小于最小优先级

        sample_max_priority = np_max(new_priorities)  # 获取样本的最大优先级
        if sample_max_priority > self.max_priority:
            self.max_priority = sample_max_priority  # 更新最大优先级

    def clear(
            self,
    ) -> None:
        """
        清空缓冲区，重置所有状态。
        """
        self.write_pointer: int = 0  # 重置写入指针

        self.buffer: list = [{}] * self.buffer_size  # 重新初始化经验缓冲区
        self.priorities: ndarray = zeros(self.buffer_size, dtype='float32')  # 重置优先级数组
        self.probabilities: ndarray = zeros(self.buffer_size, dtype='float32')  # 重置概率数组

        self.max_priority: float = self.min_priority  # 重置最大优先级
