import random
import numpy as np
from collections import deque
import torch

class ReplayBuffer:
    """改进的经验回放缓冲区"""
    def __init__(self, capacity=10000):
        self.buffer = deque(maxlen=capacity)
        self.capacity = capacity

    def push(self, state, action, reward, next_state, done):
        """添加经验到缓冲区"""
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        """随机采样一批经验"""
        batch = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*batch)

        return (np.array(state),
                np.array(action),
                np.array(reward),
                np.array(next_state),
                np.array(done))

    def __len__(self):
        return len(self.buffer)

class NStepReplayBuffer:
    """N步回报经验回放缓冲区"""
    def __init__(self, capacity=10000, n_step=3, gamma=0.99):
        self.buffer = deque(maxlen=capacity)
        self.n_step = n_step
        self.gamma = gamma
        self.n_step_buffer = deque(maxlen=n_step)
        self.capacity = capacity

    def push(self, state, action, reward, next_state, done):
        """添加经验，自动计算n步回报"""
        self.n_step_buffer.append((state, action, reward, next_state, done))

        if len(self.n_step_buffer) == self.n_step:
            # 计算n步回报
            state_0, action_0 = self.n_step_buffer[0][:2]
            reward_n, next_state_n, done_n = self._get_n_step_info()

            self.buffer.append((state_0, action_0, reward_n, next_state_n, done_n))

    def _get_n_step_info(self):
        """计算n步累积奖励"""
        reward_n = 0
        for i, (_, _, reward, next_state, done) in enumerate(self.n_step_buffer):
            reward_n += (self.gamma ** i) * reward
            if done:
                return reward_n, next_state, done

        # 如果没有提前结束，返回最后的状态
        _, _, _, next_state, done = self.n_step_buffer[-1]
        return reward_n, next_state, done

    def sample(self, batch_size):
        """采样批次"""
        batch = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*batch)

        return (np.array(state),
                np.array(action),
                np.array(reward),
                np.array(next_state),
                np.array(done))

    def __len__(self):
        return len(self.buffer)

class PrioritizedReplayBuffer:
    """改进的优先级经验回放缓冲区"""
    def __init__(self, capacity=10000, alpha=0.6):
        self.capacity = capacity
        self.alpha = alpha
        self.buffer = []
        self.priorities = np.zeros((capacity,), dtype=np.float32)
        self.position = 0
        self.max_priority = 1.0  # 🔥 跟踪最大优先级，提高数值稳定性

    def push(self, state, action, reward, next_state, done):
        """添加经验，使用最大优先级"""
        if len(self.buffer) < self.capacity:
            self.buffer.append((state, action, reward, next_state, done))
        else:
            self.buffer[self.position] = (state, action, reward, next_state, done)

        # 🔥 使用最大优先级确保新经验被采样
        self.priorities[self.position] = self.max_priority
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size, beta=0.4):
        """优先级采样"""
        if len(self.buffer) == self.capacity:
            priorities = self.priorities
        else:
            priorities = self.priorities[:self.position]

        # 🔥 添加小的epsilon避免零优先级
        priorities = priorities + 1e-6

        # 计算采样概率
        probs = priorities ** self.alpha
        probs /= probs.sum()

        # 🔥 稳定的采样方法
        indices = np.random.choice(len(self.buffer), batch_size, p=probs, replace=False)
        samples = [self.buffer[idx] for idx in indices]

        # 计算重要性权重
        total = len(self.buffer)
        weights = (total * probs[indices]) ** (-beta)
        weights /= weights.max()  # 归一化
        weights = np.array(weights, dtype=np.float32)

        state, action, reward, next_state, done = zip(*samples)

        return (np.array(state),
                np.array(action),
                np.array(reward),
                np.array(next_state),
                np.array(done),
                indices,
                weights)

    def update_priorities(self, indices, priorities):
        """更新优先级"""
        for idx, priority in zip(indices, priorities):
            # 🔥 确保优先级为正数且不会太大
            priority = max(priority, 1e-6)
            priority = min(priority, 1000.0)  # 避免过大的优先级

            self.priorities[idx] = priority
            self.max_priority = max(self.max_priority, priority)

    def __len__(self):
        return len(self.buffer)

class AdvancedPrioritizedReplayBuffer:
    """高级优先级经验回放缓冲区，支持多种优先级策略"""
    def __init__(self, capacity=10000, alpha=0.6, priority_strategy='td_error'):
        self.capacity = capacity
        self.alpha = alpha
        self.priority_strategy = priority_strategy  # 'td_error', 'loss', 'surprise'
        self.buffer = []
        self.priorities = np.zeros((capacity,), dtype=np.float32)
        self.position = 0
        self.max_priority = 1.0

        # 🔥 额外统计信息
        self.reward_buffer = np.zeros(capacity, dtype=np.float32)
        self.action_counts = {}  # 动作访问计数

    def push(self, state, action, reward, next_state, done):
        """添加经验"""
        if len(self.buffer) < self.capacity:
            self.buffer.append((state, action, reward, next_state, done))
        else:
            self.buffer[self.position] = (state, action, reward, next_state, done)

        # 记录奖励用于计算surprise优先级
        self.reward_buffer[self.position] = reward

        # 统计动作频率
        if action not in self.action_counts:
            self.action_counts[action] = 0
        self.action_counts[action] += 1

        self.priorities[self.position] = self.max_priority
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size, beta=0.4):
        """高级优先级采样"""
        if len(self.buffer) == self.capacity:
            priorities = self.priorities.copy()
        else:
            priorities = self.priorities[:self.position].copy()

        # 🔥 根据策略调整优先级
        if self.priority_strategy == 'surprise':
            priorities = self._apply_surprise_bonus(priorities)
        elif self.priority_strategy == 'action_balance':
            priorities = self._apply_action_balance(priorities)

        priorities = priorities + 1e-6
        probs = priorities ** self.alpha
        probs /= probs.sum()

        # 分层采样：确保采样多样性
        indices = self._stratified_sample(probs, batch_size)
        samples = [self.buffer[idx] for idx in indices]

        # 计算重要性权重
        total = len(self.buffer)
        weights = (total * probs[indices]) ** (-beta)
        weights /= weights.max()
        weights = np.array(weights, dtype=np.float32)

        state, action, reward, next_state, done = zip(*samples)

        return (np.array(state),
                np.array(action),
                np.array(reward),
                np.array(next_state),
                np.array(done),
                indices,
                weights)

    def _apply_surprise_bonus(self, priorities):
        """应用surprise奖励机制"""
        if len(self.buffer) < 100:
            return priorities

        # 计算奖励的running average
        recent_rewards = self.reward_buffer[:len(self.buffer)]
        reward_mean = np.mean(recent_rewards)
        reward_std = np.std(recent_rewards) + 1e-6

        # 对异常奖励的经验给予更高优先级
        for i in range(len(priorities)):
            reward = self.reward_buffer[i] if i < len(self.buffer) else 0
            surprise = abs(reward - reward_mean) / reward_std
            priorities[i] *= (1 + 0.1 * surprise)  # 10% surprise bonus

        return priorities

    def _apply_action_balance(self, priorities):
        """平衡不同动作的采样"""
        if not self.action_counts:
            return priorities

        total_samples = sum(self.action_counts.values())

        for i in range(len(priorities)):
            if i < len(self.buffer):
                _, action, _, _, _ = self.buffer[i]
                action_freq = self.action_counts.get(action, 1)
                # 给低频动作更高优先级
                balance_factor = total_samples / (action_freq * len(self.action_counts))
                priorities[i] *= min(balance_factor, 2.0)  # 限制最大增益

        return priorities

    def _stratified_sample(self, probs, batch_size):
        """分层采样确保多样性"""
        indices = []
        n_strata = min(batch_size, 10)  # 最多10个层次
        stratum_size = len(probs) // n_strata

        for i in range(n_strata):
            start = i * stratum_size
            end = (i + 1) * stratum_size if i < n_strata - 1 else len(probs)
            stratum_probs = probs[start:end]
            stratum_probs /= stratum_probs.sum()

            n_samples = batch_size // n_strata
            if i < batch_size % n_strata:
                n_samples += 1

            stratum_indices = np.random.choice(
                range(start, end),
                size=min(n_samples, end - start),
                p=stratum_probs,
                replace=False
            )
            indices.extend(stratum_indices)

        # 如果还需要更多样本，随机补充
        if len(indices) < batch_size:
            remaining = batch_size - len(indices)
            available = list(set(range(len(probs))) - set(indices))
            if available:
                remaining_probs = probs[available]
                remaining_probs /= remaining_probs.sum()
                additional = np.random.choice(
                    available,
                    size=min(remaining, len(available)),
                    p=remaining_probs,
                    replace=False
                )
                indices.extend(additional)

        return np.array(indices[:batch_size])

    def update_priorities(self, indices, priorities):
        """更新优先级"""
        for idx, priority in zip(indices, priorities):
            priority = np.clip(priority, 1e-6, 1000.0)
            self.priorities[idx] = priority
            self.max_priority = max(self.max_priority, priority)

    def get_stats(self):
        """获取缓冲区统计信息"""
        if len(self.buffer) == 0:
            return {}

        priorities = self.priorities[:len(self.buffer)]
        return {
            'buffer_size': len(self.buffer),
            'avg_priority': np.mean(priorities),
            'max_priority': np.max(priorities),
            'min_priority': np.min(priorities),
            'priority_std': np.std(priorities),
            'action_diversity': len(self.action_counts),
            'most_common_actions': sorted(self.action_counts.items(),
                                          key=lambda x: x[1], reverse=True)[:5]
        }

    def __len__(self):
        return len(self.buffer)