import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import gym

# 定义一个简单的神经网络作为策略网络
class PolicyNetwork(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(PolicyNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 定义PPO算法
class PPO:
    def __init__(self, input_dim, hidden_dim, output_dim, lr, gamma, K_epochs, epsilon_clip):
        self.policy = PolicyNetwork(input_dim, hidden_dim, output_dim)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=lr)
        self.gamma = gamma
        self.K_epochs = K_epochs
        self.epsilon_clip = epsilon_clip

    def select_action(self, state):
        state = torch.tensor(state, dtype=torch.float32)
        action_probs = torch.softmax(self.policy(state), dim=-1)
        action = torch.multinomial(action_probs, 1).item()
        return action

    def update(self, states, actions, old_probs, rewards):
        states = torch.tensor(states, dtype=torch.float32)
        actions = torch.tensor(actions, dtype=torch.int64)
        old_probs = torch.tensor(old_probs, dtype=torch.float32)
        rewards = torch.tensor(rewards, dtype=torch.float32)

        for _ in range(self.K_epochs):
            action_probs = torch.softmax(self.policy(states), dim=-1)
            dist = torch.distributions.Categorical(action_probs)
            entropy = dist.entropy().mean()

            new_probs = dist.log_prob(actions)
            ratio = torch.exp(new_probs - old_probs)
            clipped_ratio = torch.clamp(ratio, 1 - self.epsilon_clip, 1 + self.epsilon_clip)
            surrogate_loss = -torch.min(ratio * rewards, clipped_ratio * rewards).mean()

            loss = surrogate_loss + 0.5 * entropy

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

# 主训练循环
env = gym.make('CartPole-v1')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n

ppo = PPO(state_dim, 64, action_dim, lr=0.002, gamma=0.99, K_epochs=10, epsilon_clip=0.2)

max_episodes = 1000
max_timesteps = 150
update_timestep = 2000

for episode in range(max_episodes):
    state = env.reset()
    states, actions, old_probs, rewards = [], [], [], []
    total_reward = 0

    for t in range(max_timesteps):
        action = ppo.select_action(state)
        states.append(state)
        actions.append(action)
        old_probs.append(torch.softmax(ppo.policy(torch.tensor(state, dtype=torch.float32)), dim=-1)[action].detach().item())
        state, reward, done, _ = env.step(action)
        rewards.append(reward)
        total_reward += reward

        if t % update_timestep == 0:
            ppo.update(states, actions, old_probs, rewards)
            states, actions, old_probs, rewards = [], [], [], []

        if done:
            break

    if episode % 10 == 0:
        print(f"Episode: {episode}, Total Reward: {total_reward}")

env.close()

