import numpy as np
import random
from collections import deque
from typing import List, Tuple, Dict, Any


class ReplayBuffer:
    """
    经验回放缓冲区，用于DQN训练
    """
    def __init__(self, capacity: int):
        """
        初始化缓冲区
        
        Args:
            capacity: 缓冲区容量
        """
        self.buffer = deque(maxlen=capacity)
        self.capacity = capacity
    
    def push(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, done: bool):
        """
        添加经验到缓冲区
        
        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一个状态
            done: 是否结束
        """
        experience = (state, action, reward, next_state, done)
        self.buffer.append(experience)
    
    def sample(self, batch_size: int) -> List[Tuple]:
        """
        从缓冲区采样一批经验
        
        Args:
            batch_size: 批次大小
            
        Returns:
            List[Tuple]: 经验样本列表
        """
        if batch_size > len(self.buffer):
            batch_size = len(self.buffer)
        
        batch = random.sample(self.buffer, batch_size)
        return batch
    
    def __len__(self) -> int:
        """
        获取缓冲区当前大小
        
        Returns:
            int: 缓冲区大小
        """
        return len(self.buffer)


class PrioritizedReplayBuffer:
    """
    优先经验回放缓冲区，根据TD误差优先采样重要的经验
    """
    def __init__(self, capacity: int, alpha: float = 0.6, beta: float = 0.4, beta_increment: float = 0.001):
        """
        初始化优先经验回放缓冲区
        
        Args:
            capacity: 缓冲区容量
            alpha: 优先级指数，控制优先级的影响程度
            beta: 重要性采样指数，用于纠正优先采样的偏差
            beta_increment: beta的增量，随着训练逐渐增加到1
        """
        self.capacity = capacity
        self.buffer = []
        self.position = 0
        self.priorities = np.zeros((capacity,), dtype=np.float32)
        self.alpha = alpha
        self.beta = beta
        self.beta_increment = beta_increment
        self.max_priority = 1.0
    
    def push(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, done: bool):
        """
        添加经验到缓冲区
        
        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一个状态
            done: 是否结束
        """
        experience = (state, action, reward, next_state, done)
        
        if len(self.buffer) < self.capacity:
            self.buffer.append(experience)
        else:
            self.buffer[self.position] = experience
        
        # 新经验的优先级设为最大优先级
        self.priorities[self.position] = self.max_priority
        self.position = (self.position + 1) % self.capacity
    
    def sample(self, batch_size: int) -> Tuple[List[Tuple], List[int], np.ndarray]:
        """
        从缓冲区优先采样一批经验
        
        Args:
            batch_size: 批次大小
            
        Returns:
            Tuple[List[Tuple], List[int], np.ndarray]: (经验样本列表, 样本索引, 重要性权重)
        """
        if len(self.buffer) < self.capacity:
            priorities = self.priorities[:len(self.buffer)]
        else:
            priorities = self.priorities
        
        # 计算采样概率
        probs = priorities ** self.alpha
        probs = probs / np.sum(probs)
        
        # 采样索引
        indices = np.random.choice(len(self.buffer), batch_size, p=probs)
        
        # 计算重要性权重
        weights = (len(self.buffer) * probs[indices]) ** (-self.beta)
        weights = weights / np.max(weights)  # 归一化
        
        # 增加beta值，逐渐接近1
        self.beta = min(1.0, self.beta + self.beta_increment)
        
        # 获取样本
        samples = [self.buffer[idx] for idx in indices]
        
        return samples, indices, weights
    
    def update_priorities(self, indices: List[int], priorities: np.ndarray):
        """
        更新经验的优先级
        
        Args:
            indices: 经验索引列表
            priorities: 新的优先级值列表
        """
        for idx, priority in zip(indices, priorities):
            self.priorities[idx] = priority
            self.max_priority = max(self.max_priority, priority)
    
    def __len__(self) -> int:
        """
        获取缓冲区当前大小
        
        Returns:
            int: 缓冲区大小
        """
        return len(self.buffer) 