from collections import deque
import random
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torch.nn.functional as F
from torch.distributions import Normal


class PPOBuffer:
    """支持on-policy采样的经验回放缓冲区"""

    def __init__(self, buffer_size=5000):
        self.buffer_size = buffer_size
        self.buffer = {
            "states": deque(maxlen=buffer_size),
            "actions": deque(maxlen=buffer_size),
            "log_probs": deque(maxlen=buffer_size),
            "rewards": deque(maxlen=buffer_size),
            "next_states": deque(maxlen=buffer_size),
            "dones": deque(maxlen=buffer_size),
        }
        self.indices = []

    def store(self, transition):
        """存储转换数据"""
        for key, value in transition.items():
            if key in self.buffer:
                self.buffer[key].append(value)
            else:
                raise KeyError(f"Invalid key {key} in transition data.")

    def sample(self, batch_size):
        """随机采样一批数据"""
        batch_size = min(batch_size, len(self.buffer))
        return random.sample(self.buffer, batch_size)

    def clear(self):
        """清空缓冲区（PPO是on-policy，需要定期清空旧数据）"""
        for key, value in self.buffer.items():
            value.clear()

    def __len__(self):
        return len(self.buffer)


class PPO_Trainer:
    def __init__(
        self,
        actor: nn.Module,
        critic: nn.Module,
        device,
        lr_actor=3e-4,
        lr_critic=1e-3,
        gamma=0.99,
        gae_lambda=0.95,
        clip_epsilon=0.2,
        k_epochs=4,
        mini_batch_size=64,
    ):
        # 策略网络和价值网络
        self.actor = actor
        self.critic = critic
        self.device = device

        # 优化器
        self.optimizer_actor = optim.Adam(actor.parameters(), lr=lr_actor)
        self.optimizer_critic = optim.Adam(critic.parameters(), lr=lr_critic)

        # 超参数
        self.gamma = gamma
        self.gae_lambda = gae_lambda  # 广义优势估计的衰减因子
        self.clip_epsilon = clip_epsilon  # 裁剪比率
        self.k_epochs = k_epochs
        self.mini_batch_size = mini_batch_size

        # 训练缓冲区
        self.states = []
        self.actions = []
        self.log_probs = []
        self.rewards = []
        self.next_states = []
        self.dones = []

    def store_transition(self, state, action, log_prob, reward, next_state, done):
        """存储单步转换数据"""
        self.states.append(state)
        self.actions.append(action)
        self.log_probs.append(log_prob)
        self.rewards.append(reward)
        self.next_states.append(next_state)
        self.dones.append(done)

    def clear_buffer(self):
        """清空缓冲区"""
        self.states = []
        self.actions = []
        self.log_probs = []
        self.rewards = []
        self.next_states = []
        self.dones = []

    def compute_gae(self, values, next_values, rewards, dones):
        """计算广义优势估计(GAE)"""
        advantages = torch.zeros_like(rewards)
        gae = 0
        for t in reversed(range(len(rewards))):
            # 计算TD误差
            delta = rewards[t] + self.gamma * next_values[t] * (1 - dones[t]) - values[t]

            # 计算GAE
            gae = delta + self.gamma * self.gae_lambda * (1 - dones[t]) * gae
            advantages[t] = gae

        # 标准化优势函数
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
        return advantages

    def update(self):
        """执行PPO更新"""
        # 将数据转换为张量，并放到指定设备
        states = torch.stack([torch.tensor(s, dtype=torch.float32, device=self.device) for s in self.states])
        old_log_probs = torch.tensor(self.log_probs, dtype=torch.float32, device=self.device)
        rewards = torch.tensor(self.rewards, dtype=torch.float32, device=self.device)
        next_states = torch.stack([torch.tensor(s, dtype=torch.float32, device=self.device) for s in self.next_states])
        dones = torch.tensor(self.dones, dtype=torch.float32, device=self.device)

        # 计算状态值
        with torch.no_grad():
            values = self.critic(states).squeeze()
            next_values = self.critic(next_states).squeeze()

        # 计算GAE优势函数
        advantages = self.compute_gae(values, next_values, rewards, dones)

        # 计算回报
        returns = advantages + values

        # 数据集大小
        dataset_size = len(states)
        indices = np.arange(dataset_size)

        # 多轮优化
        for _ in range(self.k_epochs):
            np.random.shuffle(indices)

            # 小批量更新
            for start in range(0, dataset_size, self.mini_batch_size):
                end = start + self.mini_batch_size
                mini_indices = indices[start:end]

                # 提取小批量数据
                batch_states = states[mini_indices]
                batch_old_log_probs = old_log_probs[mini_indices]
                batch_advantages = advantages[mini_indices]
                batch_returns = returns[mini_indices]

                # 使用当前策略评估动作
                _, _, batch_log_probs = self.actor(batch_states)

                # 计算概率比
                ratios = torch.exp(batch_log_probs - batch_old_log_probs)

                # 计算裁剪的代理目标
                surr1 = ratios * batch_advantages
                surr2 = torch.clamp(ratios, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * batch_advantages
                actor_loss = -torch.min(surr1, surr2).mean()

                # 计算Critic损失
                batch_values = self.critic(batch_states).squeeze()
                critic_loss = F.mse_loss(batch_values, batch_returns)

                # 熵正则化
                dist_params = self.actor.policy_net(batch_states)
                dist = Normal(dist_params[:, 0], torch.exp(dist_params[:, 1]) + 1e-6)
                entropy_bonus = dist.entropy().mean()

                # 总损失
                total_loss = actor_loss + 0.5 * critic_loss - 0.01 * entropy_bonus

                # 优化步骤
                self.optimizer_actor.zero_grad()
                self.optimizer_critic.zero_grad()
                total_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                self.optimizer_actor.step()
                self.optimizer_critic.step()

        # 清空缓冲区
        print(f"[PPO_Trainer] Updated PPO with {len(self.states)} transitions.")
        self.clear_buffer()

    @property
    def params(self):
        return {
            "lr_actor": self.optimizer_actor.param_groups[0]["lr"],
            "lr_critic": self.optimizer_critic.param_groups[0]["lr"],
            "gamma": self.gamma,
            "gae_lambda": self.gae_lambda,
            "clip_epsilon": self.clip_epsilon,
            "k_epochs": self.k_epochs,
            "mini_batch_size": self.mini_batch_size,
        }


class PPO_Trainer_Batched:
    def __init__(
        self,
        actor: nn.Module,
        critic: nn.Module,
        device,
        lr_actor=1e-4,
        lr_critic=3e-4,
        gamma=0.995,
        gae_lambda=0.97,
        clip_epsilon=0.15,
        k_epochs=3,
        mini_batch_size=64,
        buffer_size=10000,
        segment_length=100,
        entropy_coef=0.01,
        n_episodes_per_update=4,
    ):
        # 策略网络和价值网络
        self.actor = actor
        self.critic = critic
        self.device = device
        

        # 优化器
        self.optimizer_actor = optim.Adam(actor.parameters(), lr=lr_actor)
        self.optimizer_critic = optim.Adam(critic.parameters(), lr=lr_critic)

        # 超参数
        self.gamma = gamma
        self.gae_lambda = gae_lambda
        self.clip_epsilon = clip_epsilon
        self.k_epochs = k_epochs
        self.mini_batch_size = mini_batch_size
        self.segment_length = segment_length  # 轨迹片段长度
        self.entropy_coef = entropy_coef
        self.n_episodes_per_update = n_episodes_per_update  # 每次更新的episode数量

        # 经验回放缓冲区
        self.buffer = [PPOBuffer() for _ in range(self.n_episodes_per_update)]  # 每个episode一个缓冲区
        self.episode_cnt = 0  # 记录训练轮数

    def store_transition(self, state, action, log_prob, reward, next_state, done):
        """存储单步转换数据"""
        transition = {
            "states": state,
            "actions": action,
            "log_probs": log_prob,
            "rewards": reward,
            "next_states": next_state,
            "dones": done,
        }
        self.buffer[self.episode_cnt % self.n_episodes_per_update].store(transition)

    def compute_gae_episodic(self):
        """计算完整回合的GAE"""
        all_advantages = []
        all_returns = []
        
        for episode in self.buffer:
            episode = episode.buffer
            rewards = torch.tensor(episode['rewards'], dtype=torch.float32, device=self.device)
            dones = torch.tensor(episode['dones'], dtype=torch.float32, device=self.device)
            states = [torch.tensor(s, dtype=torch.float32, device =self.device) for s in episode['states']]
            states_tensor = torch.stack(states)
            
            # 计算状态值
            with torch.no_grad():
                values = self.critic(states_tensor).squeeze()
                if isinstance(values.tolist(), float):
                    next_values = [0]
                else:
                    next_values = values.tolist()[1:] + [0]  # 终止状态值为0
            
            advantages = []
            gae = 0
            # 反向计算GAE
            for t in reversed(range(len(rewards))):
                delta = rewards[t] + self.gamma * next_values[t] * (1 - dones[t]) - values[t]
                gae = delta + self.gamma * self.gae_lambda * (1 - dones[t]) * gae
                advantages.insert(0, gae)
            
            # 标准化优势函数（仅在当前回合内）
            advantages = torch.tensor(advantages, device=self.device)
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
            
            # 计算回报
            returns = advantages + values
            
            all_advantages.extend(advantages)
            all_returns.extend(returns)
        
        return all_advantages, all_returns

    def update(self):
        """使用多个回合的数据进行更新"""
        self.episode_cnt += 1
        if self.episode_cnt % self.n_episodes_per_update != 0:
            print(f"[PPO_Trainer_Batched] 还未收集到足够的回合数据进行更新, 当前回合数: {self.episode_cnt}")
            return False, None
        
        # 计算GAE和回报
        advantages, returns = self.compute_gae_episodic()
        advantages = torch.tensor(advantages, dtype=torch.float32, device=self.device)
        returns = torch.tensor(returns, dtype=torch.float32, device=self.device)
        
        # 准备所有数据
        states = []
        actions = []
        old_log_probs = []
        
        for episode in self.buffer:
            episode = episode.buffer
            states.extend(episode['states'])
            actions.extend(episode['actions'])
            old_log_probs.extend(episode['log_probs'])
        
        states_tensor = torch.tensor(np.array(states), dtype=torch.float32, device=self.device)
        actions_tensor = torch.tensor(actions, dtype=torch.float32, device=self.device)
        old_log_probs_tensor = torch.tensor(old_log_probs, dtype=torch.float32, device=self.device)
        
        # 多轮优化
        mean_actor_loss = 0
        mean_critic_loss = 0
        mean_entropy_bonus = 0
        batch_num = 0
        for epoch in range(self.k_epochs):
            # 随机打乱所有数据
            indices = torch.randperm(len(states_tensor))
            
            # 小批量更新
            for start in range(0, len(states_tensor), self.mini_batch_size):
                end = min(start + self.mini_batch_size, len(states_tensor))
                batch_indices = indices[start:end]
                
                batch_states = states_tensor[batch_indices]
                batch_actions = actions_tensor[batch_indices]
                batch_old_log_probs = old_log_probs_tensor[batch_indices]
                batch_advantages = advantages[batch_indices]
                batch_returns = returns[batch_indices]
                
                # 使用当前策略评估动作
                _, _, batch_log_probs = self.actor(batch_states)
                
                # 计算概率比
                ratios = torch.exp(batch_log_probs - batch_old_log_probs)
                
                # 计算裁剪的代理目标
                surr1 = ratios * batch_advantages
                surr2 = torch.clamp(ratios, 1 - self.clip_epsilon, 
                                   1 + self.clip_epsilon) * batch_advantages
                actor_loss = -torch.min(surr1, surr2).mean()
                
                # 计算Critic损失
                batch_values = self.critic(batch_states).squeeze()
                critic_loss = F.mse_loss(batch_values, batch_returns)
                
                # 熵正则化
                dist_params = self.actor.policy_net(batch_states)
                dist = Normal(dist_params[:, 0], torch.exp(dist_params[:, 1]) + 1e-6)
                entropy_bonus = dist.entropy().mean()
                
                # 总损失
                total_loss = actor_loss + 0.5 * critic_loss
                total_loss -= self.entropy_coef * entropy_bonus * np.exp(- self.episode_cnt / 200)

                mean_actor_loss += actor_loss
                mean_critic_loss += critic_loss
                mean_entropy_bonus += entropy_bonus
                batch_num += 1
                
                
                # 优化步骤
                self.optimizer_actor.zero_grad()
                self.optimizer_critic.zero_grad()
                total_loss.backward()
                
                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                
                self.optimizer_actor.step()
                self.optimizer_critic.step()
        
        mean_actor_loss /= batch_num
        mean_critic_loss /= batch_num
        mean_entropy_bonus /= batch_num
        
        # 打印更新统计
        total_steps = sum(len(ep.buffer['rewards']) for ep in self.buffer)
        print(f"策略更新完成! 使用 {len(self.buffer)} 回合, {total_steps} 步数据")
        for i, episode in enumerate(self.buffer):
            print(f"回合 {i + 1} 数据量: {len(episode.buffer['rewards'])}")
            episode.clear()
        return True , (mean_actor_loss.item(), mean_critic_loss.item(), mean_entropy_bonus.item())
    
    @property
    def params(self):
        return {
            "lr_actor": self.optimizer_actor.param_groups[0]["lr"],
            "lr_critic": self.optimizer_critic.param_groups[0]["lr"],
            "gamma": self.gamma,
            "gae_lambda": self.gae_lambda,
            "clip_epsilon": self.clip_epsilon,
            "k_epochs": self.k_epochs,
            "mini_batch_size": self.mini_batch_size,
            "buffer_size": self.buffer[0].buffer_size,
            "segment_length": self.segment_length,
            "entropy_coef": self.entropy_coef,
            "episode_cnt": self.episode_cnt,
            "n_episodes_per_update": self.n_episodes_per_update,
        }
