import os
import random
from collections import deque
from datetime import datetime

import gymnasium as gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter

# 设置设备（CPU或GPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 训练参数
max_episodes = 1000
max_steps = 1000
batch_size = 64
buffer_size = 100000
gamma = 0.99  # 折扣因子
tau = 0.001  # 目标网络软更新参数
actor_lr = 1e-4
critic_lr = 1e-3
weight_decay = 1e-2  # 仅应用于Critic网络
exploration_noise = 0.1  # 动作探索噪声

# 初始化环境
env = gym.make("Pendulum-v1", max_episode_steps=max_steps)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high[0]  # Pendulum的动作范围是[-2, 2]

# 设置随机种子
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
env.reset(seed=seed)


# Actor网络（确定性策略）
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims=[256, 256], action_bound=1.0):
        super(Actor, self).__init__()

        # 构建特征提取网络
        layers = []
        input_dim = state_dim

        for hidden_dim in hidden_dims:
            layers.append(nn.Linear(input_dim, hidden_dim))
            layers.append(nn.ReLU())
            layers.append(nn.LayerNorm(hidden_dim))
            input_dim = hidden_dim

        # 输出层，使用tanh激活函数限制输出范围在[-1, 1]
        self.feature_extractor = nn.Sequential(*layers)
        self.output_layer = nn.Linear(hidden_dims[-1], action_dim)
        self.action_bound = action_bound

        # 初始化参数
        self._init_weights()

    def _init_weights(self):
        for m in self.feature_extractor.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
                nn.init.constant_(m.bias, 0)

        # 输出层使用较小的权重初始化
        nn.init.orthogonal_(self.output_layer.weight, gain=0.01)
        nn.init.constant_(self.output_layer.bias, 0)

    def forward(self, state):
        x = self.feature_extractor(state)
        # 使用tanh激活确保输出在[-1, 1]范围，然后缩放到动作范围
        return torch.tanh(self.output_layer(x)) * self.action_bound


# Critic网络（Q值函数）
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims=[256, 256]):
        super(Critic, self).__init__()

        # 第一层只处理状态
        self.fc1 = nn.Linear(state_dim, hidden_dims[0])
        self.ln1 = nn.LayerNorm(hidden_dims[0])

        # 第二层处理状态和动作的组合
        self.fc2 = nn.Linear(hidden_dims[0] + action_dim, hidden_dims[1])
        self.ln2 = nn.LayerNorm(hidden_dims[1])

        # 输出层
        self.fc3 = nn.Linear(hidden_dims[1], 1)

        # 初始化参数
        self._init_weights()

    def _init_weights(self):
        nn.init.orthogonal_(self.fc1.weight, gain=np.sqrt(2))
        nn.init.constant_(self.fc1.bias, 0)

        nn.init.orthogonal_(self.fc2.weight, gain=np.sqrt(2))
        nn.init.constant_(self.fc2.bias, 0)

        nn.init.orthogonal_(self.fc3.weight, gain=0.01)
        nn.init.constant_(self.fc3.bias, 0)

    def forward(self, state, action):
        # 处理状态
        x = F.relu(self.ln1(self.fc1(state)))

        # 连接状态和动作
        x = torch.cat([x, action], dim=1)

        # 处理组合输入
        x = F.relu(self.ln2(self.fc2(x)))

        # 输出Q值
        return self.fc3(x)


# 噪声生成器 (Ornstein-Uhlenbeck过程)
class OUNoise:
    def __init__(self, action_dim, mu=0, theta=0.15, sigma=0.2):
        self.action_dim = action_dim
        self.mu = mu
        self.theta = theta
        self.sigma = sigma
        self.reset()

    def reset(self):
        self.state = np.ones(self.action_dim) * self.mu

    def sample(self):
        x = self.state
        dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
        self.state = x + dx
        return self.state


# 经验回放缓冲区
class ReplayBuffer:
    def __init__(self, buffer_size):
        self.buffer = deque(maxlen=buffer_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        return (
            np.array(states),
            np.array(actions),
            np.array(rewards, dtype=np.float32),
            np.array(next_states),
            np.array(dones, dtype=np.float32),
        )

    def __len__(self):
        return len(self.buffer)


# DDPG算法
class DDPG:
    def __init__(
        self,
        state_dim,
        action_dim,
        action_bound,
        actor_lr=1e-4,
        critic_lr=1e-3,
        gamma=0.99,
        tau=0.001,
        buffer_size=100000,
        batch_size=64,
        device=device,
    ):
        self.device = device
        self.gamma = gamma
        self.tau = tau
        self.batch_size = batch_size
        self.action_bound = action_bound

        # 创建Actor和Critic网络
        self.actor = Actor(state_dim, action_dim, action_bound=action_bound).to(device)
        self.actor_target = Actor(state_dim, action_dim, action_bound=action_bound).to(
            device
        )

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = Critic(state_dim, action_dim).to(device)

        # 初始化目标网络权重与主网络相同
        self._hard_update(self.actor_target, self.actor)
        self._hard_update(self.critic_target, self.critic)

        # 创建优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = optim.Adam(
            self.critic.parameters(), lr=critic_lr, weight_decay=weight_decay
        )

        # 创建经验回放缓冲区和噪声生成器
        self.replay_buffer = ReplayBuffer(buffer_size)
        self.noise = OUNoise(action_dim)

        # TensorBoard记录器
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.writer = SummaryWriter(f"runs/DDPG_Pendulum_{timestamp}")

    def _hard_update(self, target, source):
        """目标网络的硬更新（完全复制源网络权重）"""
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(param.data)

    def _soft_update(self, target, source):
        """目标网络的软更新（缓慢混合源网络权重）"""
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(
                target_param.data * (1.0 - self.tau) + param.data * self.tau
            )

    def select_action(self, state, add_noise=True):
        """选择动作，可选添加探索噪声"""
        state = torch.FloatTensor(state).to(self.device)

        # 设置为评估模式
        self.actor.eval()

        with torch.no_grad():
            action = self.actor(state).cpu().numpy()

        # 恢复为训练模式
        self.actor.train()

        # 添加探索噪声
        if add_noise:
            noise = self.noise.sample() * exploration_noise
            action += noise

        # 裁剪动作到合法范围
        return np.clip(action, -self.action_bound, self.action_bound)

    def update(self):
        """从经验回放缓冲区中抽取批次并更新网络"""
        # 确保缓冲区中有足够的样本
        if len(self.replay_buffer) < self.batch_size:
            return None, None

        # 从缓冲区中采样
        states, actions, rewards, next_states, dones = self.replay_buffer.sample(
            self.batch_size
        )

        # 转换为张量
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.FloatTensor(actions).to(self.device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(self.device)
        next_states = torch.FloatTensor(next_states).to(self.device)
        dones = torch.FloatTensor(dones).unsqueeze(1).to(self.device)

        # 计算目标Q值
        with torch.no_grad():
            next_actions = self.actor_target(next_states)
            next_q_values = self.critic_target(next_states, next_actions)
            target_q = rewards + (1 - dones) * self.gamma * next_q_values

        # 更新Critic
        current_q = self.critic(states, actions)
        critic_loss = F.mse_loss(current_q, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), max_norm=1.0)
        self.critic_optimizer.step()

        # 更新Actor（通过最大化Q值）
        actor_loss = -self.critic(states, self.actor(states)).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.actor.parameters(), max_norm=1.0)
        self.actor_optimizer.step()

        # 软更新目标网络
        self._soft_update(self.critic_target, self.critic)
        self._soft_update(self.actor_target, self.actor)

        return actor_loss.item(), critic_loss.item()


# 初始化DDPG
ddpg = DDPG(
    state_dim,
    action_dim,
    action_bound=action_bound,
    actor_lr=actor_lr,
    critic_lr=critic_lr,
    gamma=gamma,
    tau=tau,
    buffer_size=buffer_size,
    batch_size=batch_size,
)

# 训练循环
total_steps = 0
for episode in range(max_episodes):
    state = env.reset()[0]
    ddpg.noise.reset()  # 重置探索噪声
    episode_reward = 0

    for step in range(max_steps):
        total_steps += 1

        # 选择动作并与环境交互
        action = ddpg.select_action(state)
        next_state, reward, terminated, truncated, _ = env.step(action)
        done = terminated or truncated

        # 存储经验到缓冲区
        ddpg.replay_buffer.add(state, action, reward, next_state, done)

        # 更新网络
        if len(ddpg.replay_buffer) >= batch_size:
            actor_loss, critic_loss = ddpg.update()

            # 记录到TensorBoard
            if total_steps % 100 == 0 and actor_loss is not None:
                ddpg.writer.add_scalar("Loss/Actor", actor_loss, total_steps)
                ddpg.writer.add_scalar("Loss/Critic", critic_loss, total_steps)

        episode_reward += reward
        state = next_state

        if done:
            break

    # 记录到TensorBoard和打印进度
    ddpg.writer.add_scalar("Reward/Episode", episode_reward, episode)

    if episode % 10 == 0:
        print(
            f"Episode {episode}, Total Steps: {total_steps}, Reward: {episode_reward:.2f}"
        )

    # 每100个episode保存一次模型
    if episode % 100 == 0 or episode == max_episodes - 1:
        model_dir = "saved_models"
        os.makedirs(model_dir, exist_ok=True)
        torch.save(
            ddpg.actor.state_dict(),
            os.path.join(model_dir, f"ddpg_actor_pendulum_{episode}.pth"),
        )
        torch.save(
            ddpg.critic.state_dict(),
            os.path.join(model_dir, f"ddpg_critic_pendulum_{episode}.pth"),
        )

# 保存最终模型
model_dir = "saved_models"
os.makedirs(model_dir, exist_ok=True)
torch.save(ddpg.actor.state_dict(), os.path.join(model_dir, "ddpg_actor_pendulum.pth"))
torch.save(
    ddpg.critic.state_dict(), os.path.join(model_dir, "ddpg_critic_pendulum.pth")
)
print(f"Models saved to {model_dir}")

# 关闭TensorBoard写入器
ddpg.writer.close()
env.close()
