import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import gym
import matplotlib.pyplot as plt
import imageio


# 定义策略网络
class PolicyNetwork(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(PolicyNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim, 128)
        self.fc2 = nn.Linear(128, output_dim)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        action_probs = self.softmax(x)
        return action_probs


# 独立的损失函数计算
def calculate_policy_loss(log_probs, rewards, gamma=0.99):
    discounted_rewards = []
    running_reward = 0
    for r in reversed(rewards):
        running_reward = r + gamma * running_reward
        discounted_rewards.insert(0, running_reward)
    discounted_rewards = torch.tensor(discounted_rewards)
    # 归一化奖励
    discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (discounted_rewards.std() + 1e-9)

    policy_loss = []
    for log_prob, reward in zip(log_probs, discounted_rewards):
        policy_loss.append(-log_prob * reward)
    policy_loss = torch.stack(policy_loss).sum()
    return policy_loss


# 定义策略梯度智能体
class PolicyGradientAgent:
    def __init__(self, input_dim, output_dim, lr=0.001, gamma=0.99):
        self.policy_network = PolicyNetwork(input_dim, output_dim)
        self.optimizer = optim.Adam(self.policy_network.parameters(), lr=lr)
        self.gamma = gamma
        self.log_probs = []
        self.rewards = []

    def select_action(self, state):
        state = torch.FloatTensor(state).unsqueeze(0)
        action_probs = self.policy_network(state)
        action = torch.multinomial(action_probs, 1).item()
        log_prob = torch.log(action_probs.squeeze(0)[action])
        self.log_probs.append(log_prob)
        return action

    def store_reward(self, reward):
        self.rewards.append(reward)

    def update_policy(self):
        policy_loss = calculate_policy_loss(self.log_probs, self.rewards, self.gamma)

        self.optimizer.zero_grad()
        policy_loss.backward()
        self.optimizer.step()

        # 清空存储的数据
        self.log_probs = []
        self.rewards = []


# 主训练循环
if __name__ == "__main__":
    env_name='CartPole-v1'
    env = gym.make(env_name)
    input_dim = env.observation_space.shape[0]
    output_dim = env.action_space.n
    agent = PolicyGradientAgent(input_dim, output_dim)

    num_episodes = 1000
    all_rewards = []  # 用于记录每个回合的总奖励

    for episode in range(num_episodes):
        state = env.reset()
        state = state[0]
        total_reward = 0
        done = False

        while not done:
            action = agent.select_action(state)         # 选择动作时，会保存每一次做选择时的动作概率，等会会用于做损失计算
            next_state, reward, terminated, truncated, _ = env.step(action)
            done = terminated or truncated
            agent.store_reward(reward)                  # 保存每一次采样的奖励
            total_reward += reward
            state = next_state

        agent.update_policy()                           # 使用这一次的采样做一次动作网络的执行
        all_rewards.append(total_reward)

        if episode % 10 == 0:
            print(f"Episode {episode}: Total Reward = {total_reward}")

    env.close()

    # 绘制奖励曲线
    plt.plot(all_rewards)
    plt.xlabel('Episode')
    plt.ylabel('Total Reward')
    plt.title('Reward Curve over Episodes')
    plt.show()

    # 重新创建环境用于渲染
    env = gym.make(env_name, render_mode='rgb_array')
    frames = []
    state = env.reset()
    state = state[0]
    done = False
    max_steps = 3600  # 设置较大的最大步数
    step_count = 0
    while step_count<max_steps:
        action = agent.select_action(state)
        next_state, reward, terminated, truncated, _ = env.step(action)
        done = terminated or truncated
        frame = env.render()
        if frame is not None:
            frames.append(frame)
        state = next_state
        step_count+=1

    env.close()

    # 保存为视频
    epoch = num_episodes
    imageio.mimsave(f"./policy_gradient_{env_name}_{epoch}_video.mp4", frames, fps=60)
    print(f"视频已保存为 ./policy_gradient_{env_name}_{epoch}_video.mp4")