from collections import deque
import numpy as np


class MultiStepReplayBuffer:
    """支持Multi-step学习和优先级经验回放的缓冲区"""

    def __init__(self, capacity: int, n_step: int, gamma: float,
                 use_prioritized: bool = False, alpha: float = 0.6):
        self.capacity = capacity
        self.n_step = n_step
        self.gamma = gamma
        self.use_prioritized = use_prioritized
        self.alpha = alpha

        # 使用列表作为循环缓冲区
        self.buffer = [None] * capacity
        self.n_step_buffer = deque(maxlen=n_step)
        self.position = 0  # 下一个写入位置
        self.current_size = 0  # 当前缓冲区中有效经验的数量

        # 优先级经验回放
        if use_prioritized:
            self.priorities = np.zeros(capacity, dtype=np.float32)
            self.max_priority = 1.0  # 初始最大优先级

    def push(self, state, action, reward, next_state, done):
        """添加经验到缓冲区并处理n步return"""
        # 存储当前transition到n_step缓冲区
        self.n_step_buffer.append((state, action, reward, next_state, done))

        # 只有当收集了足够的步骤或遇到终止状态时才存储n步经验
        if len(self.n_step_buffer) < self.n_step and not done:
            return

        # 计算n步return
        state, action, n_step_reward, next_state, done = self._get_n_step_info()

        # 添加到主缓冲区
        experience = (state, action, n_step_reward, next_state, done)

        # 覆盖旧数据
        self.buffer[self.position] = experience

        # 设置优先级
        if self.use_prioritized:
            self.priorities[self.position] = self.max_priority

        # 更新指针和当前大小
        self.position = (self.position + 1) % self.capacity
        if self.current_size < self.capacity:
            self.current_size += 1

        # 如果遇到终止状态，清空n步缓冲区
        if done:
            self.n_step_buffer.clear()

    def _get_n_step_info(self):
        """计算n步return信息"""
        # 获取初始状态和动作
        first_transition = self.n_step_buffer[0]
        state = first_transition[0]
        action = first_transition[1]

        # 计算累积奖励
        n_step_reward = 0
        for i, (_, _, r, _, _) in enumerate(self.n_step_buffer):
            n_step_reward += self.gamma ** i * r

        # 获取最后的下一状态和终止标志
        last_transition = self.n_step_buffer[-1]
        next_state = last_transition[3]
        done = last_transition[4]

        return state, action, n_step_reward, next_state, done

    def sample(self, batch_size: int, beta: float = 0.4):
        """采样batch数据"""
        if self.use_prioritized:
            return self._sample_prioritized(batch_size, beta)
        else:
            return self._sample_uniform(batch_size)

    def _sample_prioritized(self, batch_size: int, beta: float):
        """优先级经验回放采样"""
        # 确保缓冲区中有足够的数据
        if self.current_size < batch_size:
            return self._sample_uniform(batch_size)

        # 获取有效经验的优先级
        if self.current_size < self.capacity:
            valid_priorities = self.priorities[:self.current_size]
        else:
            valid_priorities = self.priorities

        # 计算采样概率
        probs = valid_priorities ** self.alpha
        probs /= probs.sum()

        # 采样索引
        indices = np.random.choice(
            self.current_size,
            size=batch_size,
            replace=False,
            p=probs
        )

        # 获取经验数据
        batch = [self.buffer[i] for i in indices]
        states, actions, rewards, next_states, dones = zip(*batch)

        # 计算重要性采样权重
        weights = (self.current_size * probs[indices]) ** (-beta)
        weights = weights / weights.max()  # 归一化

        return (
            (np.array(states),
             np.array(actions),
             np.array(rewards),
             np.array(next_states),
             np.array(dones)),
            weights,
            indices
        )

    def _sample_uniform(self, batch_size: int):
        """均匀采样"""
        if self.current_size < batch_size:
            # 如果数据不足，返回所有数据
            indices = np.arange(self.current_size)
        else:
            indices = np.random.choice(self.current_size, batch_size, replace=False)

        batch = [self.buffer[i] for i in indices]
        states, actions, rewards, next_states, dones = zip(*batch)

        return (
            (np.array(states),
             np.array(actions),
             np.array(rewards),
             np.array(next_states),
             np.array(dones)),
            None,
            None
        )

    def update_priorities(self, indices, priorities):
        """更新经验的优先级"""
        # 确保优先级是正数
        priorities = np.maximum(priorities, 1e-5)

        for idx, priority in zip(indices, priorities):
            # 确保索引在有效范围内
            if idx < len(self.priorities):
                self.priorities[idx] = priority
                # 更新最大优先级
                if priority > self.max_priority:
                    self.max_priority = priority

    def __len__(self):
        return self.current_size