import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal
from collections import deque
import random


# 定义一个 MLP 网络（适用于 Actor 和 Critic）
class MLPNetwork(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=256, output_activation=None):
        super(MLPNetwork, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, output_dim)
        )
        self.output_activation = output_activation

    def forward(self, x):
        x = self.net(x)
        if self.output_activation:
            x = self.output_activation(x)
        return x


# Actor 网络
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action, hidden_dim=256):
        super(Actor, self).__init__()
        self.log_std_min = -20  # 防止标准差过小
        self.log_std_max = 2  # 防止标准差过大
        self.max_action = max_action

        self.mean_net = MLPNetwork(state_dim, action_dim, hidden_dim)
        self.log_std_net = MLPNetwork(state_dim, action_dim, hidden_dim)

    def forward(self, state):
        mean = self.mean_net(state)
        log_std = self.log_std_net(state)
        log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)  # 限制 log_std 的范围
        std = torch.exp(log_std)  # 转换为标准差
        return mean, std

    def sample(self, state):
        mean, std = self.forward(state)
        dist = Normal(mean, std)  # 高斯分布
        action = dist.rsample()  # 采样动作
        log_prob = dist.log_prob(action).sum(dim=-1)  # 计算动作的 log 概率
        action = torch.tanh(action) * self.max_action  # 用 tanh 限制动作范围
        log_prob -= torch.log(1 - action.pow(2) + 1e-6).sum(dim=-1)  # 防止 log(0) 的数值问题
        return action, log_prob


# Critic 网络
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(Critic, self).__init__()
        self.q1_net = MLPNetwork(state_dim + action_dim, 1, hidden_dim)
        self.q2_net = MLPNetwork(state_dim + action_dim, 1, hidden_dim)

    def forward(self, state, action):
        sa = torch.cat([state, action], dim=-1)  # 拼接状态和动作
        q1 = self.q1_net(sa)
        q2 = self.q2_net(sa)
        return q1, q2


# SAC 算法类
class SACAgent:
    def __init__(self, state_dim, action_dim, max_action, gamma=0.99, tau=0.005, alpha=0.2):
        self.actor = Actor(state_dim, action_dim, max_action).cuda()
        self.critic = Critic(state_dim, action_dim).cuda()
        self.critic_target = Critic(state_dim, action_dim).cuda()
        self.critic_target.load_state_dict(self.critic.state_dict())  # 同步参数

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=3e-4)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=3e-4)

        self.replay_buffer = deque(maxlen=100000)  # 经验池
        self.max_action = max_action
        self.gamma = gamma  # 折扣因子
        self.tau = tau  # 软更新系数
        self.alpha = alpha  # 温度参数

    def store_transition(self, transition):
        self.replay_buffer.append(transition)  # 存储经验

    def soft_update(self):
        for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
            # 软更新目标网络
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

    def train(self, batch_size=256):
        if len(self.replay_buffer) < batch_size:
            return  # 经验不足时不训练

        # 从经验池采样
        batch = random.sample(self.replay_buffer, batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states)).cuda()
        actions = torch.FloatTensor(np.array(actions)).cuda()
        rewards = torch.FloatTensor(np.array(rewards)).cuda().unsqueeze(1)
        next_states = torch.FloatTensor(np.array(next_states)).cuda()
        dones = torch.FloatTensor(np.array(dones)).cuda().unsqueeze(1)

        # 计算目标 Q 值
        with torch.no_grad():
            next_actions, next_log_probs = self.actor.sample(next_states)
            q1_next, q2_next = self.critic_target(next_states, next_actions)
            q_target = rewards + (1 - dones) * self.gamma * (torch.min(q1_next, q2_next) - self.alpha * next_log_probs)

        # 更新 Critic 网络
        q1, q2 = self.critic(states, actions)
        critic_loss = nn.MSELoss()(q1, q_target) + nn.MSELoss()(q2, q_target)
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # 更新 Actor 网络
        actions, log_probs = self.actor.sample(states)
        q1_new, q2_new = self.critic(states, actions)
        actor_loss = (self.alpha * log_probs - torch.min(q1_new, q2_new)).mean()
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        # 软更新目标网络
        self.soft_update()

    def select_action(self, state, deterministic=False):
        if isinstance(state, tuple):
            state = state[0]  # 解包 Gym 返回的 (state, info)
        state = np.array(state)  # 确保是 NumPy 数组
        state = torch.FloatTensor(state).cuda().unsqueeze(0)

        mean, _ = self.actor(state)
        if deterministic:
            action = torch.tanh(mean) * self.max_action
        else:
            action, _ = self.actor.sample(state)
        return action.cpu().data.numpy().flatten()


# 主函数
def main():
    env = gym.make("Pendulum-v1")  # 创建 Gym 环境
    state_dim = env.observation_space.shape[0]  # 状态维度
    action_dim = env.action_space.shape[0]  # 动作维度
    max_action = float(env.action_space.high[0])  # 动作范围

    agent = SACAgent(state_dim, action_dim, max_action)  # 初始化智能体

    episodes = 200  # 训练回合数
    batch_size = 256  # 批次大小

    for episode in range(episodes):
        reset_result = env.reset()  # Gym 0.26+ 的重置
        if isinstance(reset_result, tuple):
            state, _ = reset_result
        else:
            state = reset_result

        episode_reward = 0
        for step in range(200):  # 每回合最大步数
            action = agent.select_action(state)  # 根据策略选择动作
            next_state, reward, done, truncated, _ = env.step(action)  # Gym 0.26+ 的步进
            agent.store_transition((state, action, reward, next_state, done or truncated))  # 存储经验

            state = next_state
            episode_reward += reward

            agent.train(batch_size)  # 训练智能体

            if done or truncated:
                break

        print(f"Episode {episode + 1}, Reward: {episode_reward}")

    # 测试智能体
    reset_result = env.reset()
    if isinstance(reset_result, tuple):
        state, _ = reset_result
    else:
        state = reset_result

    for _ in range(200):
        env.render()  # 渲染环境
        action = agent.select_action(state, deterministic=True)  # 确定性动作
        state, _, done, truncated, _ = env.step(action)
        if done or truncated:
            break

    env.close()  # 关闭环境


if __name__ == "__main__":
    main()
