import torch
import torch.nn as nn
import torch.optim as optim
import gym
import numpy as np

# 定义Actor-Critic网络
class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(ActorCritic, self).__init__()
        self.fc = nn.Linear(state_dim, 128)
        self.policy = nn.Linear(128, action_dim)
        self.value = nn.Linear(128, 1)

    def forward(self, x):
        x = torch.relu(self.fc(x))
        policy_logits = self.policy(x)
        value = self.value(x)
        return policy_logits, value

def train_a3c(env_name='CartPole-v1', max_episodes=200):
    env = gym.make(env_name)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    model = ActorCritic(state_dim, action_dim)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    gamma = 0.99
    for episode in range(max_episodes):
        state = env.reset()
        if isinstance(state, tuple):
            state = state[0]  # 兼容新版本gym返回(state, info)
        log_probs, values, rewards = [], [], []
        done = False
        while not done:
            state_tensor = torch.FloatTensor(np.array(state)).unsqueeze(0)
            logits, value = model(state_tensor)
            prob = torch.softmax(logits, dim=1)
            dist = torch.distributions.Categorical(prob)
            action = dist.sample().item()
            next_state = env.step(action)
            # 兼容新旧版本gym的step返回
            if isinstance(next_state, tuple):
                if len(next_state) == 5:
                    next_state, reward, terminated, truncated, _ = next_state
                    done = terminated or truncated
                else:
                    next_state, reward, done, _ = next_state
            else:
                raise RuntimeError('env.step返回值异常')
            log_probs.append(dist.log_prob(torch.tensor(action)))
            values.append(value)
            rewards.append(reward)
            state = next_state
        # 计算returns和优势
        returns, R = [], 0
        for r in reversed(rewards):
            R = r + gamma * R
            returns.insert(0, R)
        returns = torch.tensor(returns)
        values = torch.cat(values)
        log_probs = torch.stack(log_probs)
        advantage = returns - values.squeeze()
        # 损失
        actor_loss = -(log_probs * advantage.detach()).mean()
        critic_loss = advantage.pow(2).mean()
        loss = actor_loss + critic_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (episode+1) % 20 == 0:
            print(f"Episode {episode+1}, Reward: {sum(rewards)}")
    env.close()
    return model

def test_a3c(model, env_name='CartPole-v1', test_episodes=5):
    env = gym.make(env_name)
    for ep in range(test_episodes):
        state = env.reset()
        if isinstance(state, tuple):
            state = state[0]  # 兼容新版本gym返回(state, info)
        total_reward = 0
        done = False
        while not done:
            state_tensor = torch.FloatTensor(state).unsqueeze(0)
            logits, _ = model(state_tensor)
            prob = torch.softmax(logits, dim=1)
            action = torch.argmax(prob, dim=1).item()
            next_state = env.step(action)
            # 兼容新旧版本gym的step返回
            if isinstance(next_state, tuple):
                if len(next_state) == 5:
                    next_state, reward, terminated, truncated, _ = next_state
                    done = terminated or truncated
                else:
                    next_state, reward, done, _ = next_state
            else:
                raise RuntimeError('env.step返回值异常')
            state = next_state
            total_reward += reward
        print(f"Test Episode {ep+1}, Reward: {total_reward}")
    env.close()

def main():
    model = train_a3c()
    test_a3c(model)

if __name__ == "__main__":
    main()

