import gymnasium as gym
import numpy as np


# 定义策略网络（Actor）
class PolicyNet:
    def __init__(self, n_states, n_hiddens, n_actions, learning_rate):
        self.n_states = n_states
        self.n_hiddens = n_hiddens
        self.n_actions = n_actions
        self.learning_rate = learning_rate

        # 初始化权重和偏置
        self.W1 = np.random.randn(self.n_states, self.n_hiddens)
        self.b1 = np.zeros((1, self.n_hiddens))
        self.W2 = np.random.randn(self.n_hiddens, self.n_actions)
        self.b2 = np.zeros((1, self.n_actions))

    def forward(self, state):
        # 前向传播计算动作概率
        hidden = np.tanh(np.dot(state, self.W1) + self.b1)
        action_probs = np.exp(np.dot(hidden, self.W2) + self.b2)
        action_probs /= np.sum(action_probs, axis=1, keepdims=True)
        return action_probs

    def update(self, states, actions, advantages, old_log_probs, eps):
        """
        更新策略网络
        :param states: 状态
        :param actions: 动作
        :param advantages: 优势函数值
        :param old_log_probs: 旧策略的对数概率
        :param eps: PPO截断参数
        """
        batch_size = len(states)
        for _ in range(10):  # 多次更新
            grad_W1, grad_b1, grad_W2, grad_b2 = self._compute_gradients(states, actions, advantages, old_log_probs, eps)
            self.W1 += self.learning_rate * grad_W1
            self.b1 += self.learning_rate * grad_b1
            self.W2 += self.learning_rate * grad_W2
            self.b2 += self.learning_rate * grad_b2

    def _compute_gradients(self, states, actions, advantages, old_log_probs, eps):
        """
        计算策略网络的梯度
        :param states: 状态
        :param actions: 动作
        :param advantages: 优势函数值
        :param old_log_probs: 旧策略的对数概率
        :param eps: PPO截断参数
        """
        batch_size = len(states)
        grad_W1 = np.zeros(self.W1.shape)
        grad_b1 = np.zeros(self.b1.shape)
        grad_W2 = np.zeros(self.W2.shape)
        grad_b2 = np.zeros(self.b2.shape)

        for i in range(batch_size):
            state = states[i][np.newaxis, :]
            action = actions[i]
            advantage = advantages[i]
            old_log_prob = old_log_probs[i]

            action_probs = self.forward(state)
            log_prob = np.log(action_probs[0, action])

            ratio = np.exp(log_prob - old_log_prob)
            surr1 = ratio * advantage
            surr2 = np.clip(ratio, 1 - eps, 1 + eps) * advantage
            loss = -np.minimum(surr1, surr2)

            grad_log_prob = np.zeros(self.n_actions)
            grad_log_prob[action] = 1.0

            hidden = np.tanh(np.dot(state, self.W1) + self.b1)
            grad_hidden = (1 - hidden ** 2) * np.dot(grad_log_prob[np.newaxis, :], self.W2.T)

            grad_W1 += np.dot(state.T, grad_hidden) * loss
            grad_b1 += grad_hidden * loss
            grad_W2 += np.dot(hidden.T, grad_log_prob[np.newaxis, :]) * loss
            grad_b2 += grad_log_prob[np.newaxis, :] * loss

        return grad_W1 / batch_size, grad_b1 / batch_size, grad_W2 / batch_size, grad_b2 / batch_size


# 定义价值网络（Critic）
class ValueNet:
    def __init__(self, n_states, n_hiddens, learning_rate):
        self.n_states = n_states
        self.n_hiddens = n_hiddens
        self.learning_rate = learning_rate

        self.W1 = np.random.randn(self.n_states, self.n_hiddens)
        self.b1 = np.zeros((1, self.n_hiddens))
        self.W2 = np.random.randn(self.n_hiddens, 1)
        self.b2 = np.zeros((1, 1))

    def forward(self, state):
        # 前向传播计算状态价值
        hidden = np.tanh(np.dot(state, self.W1) + self.b1)
        value = np.dot(hidden, self.W2) + self.b2
        return value

    def update(self, states, targets):
        """
        更新价值网络
        :param states: 状态
        :param targets: 目标价值
        """
        batch_size = len(states)
        for _ in range(10):  # 多次更新
            grad_W1, grad_b1, grad_W2, grad_b2 = self._compute_gradients(states, targets)
            self.W1 += self.learning_rate * grad_W1
            self.b1 += self.learning_rate * grad_b1
            self.W2 += self.learning_rate * grad_W2
            self.b2 += self.learning_rate * grad_b2

    def _compute_gradients(self, states, targets):
        """
        计算价值网络的梯度
        :param states: 状态
        :param targets: 目标价值
        """
        batch_size = len(states)
        grad_W1 = np.zeros(self.W1.shape)
        grad_b1 = np.zeros(self.b1.shape)
        grad_W2 = np.zeros(self.W2.shape)
        grad_b2 = np.zeros(self.b2.shape)

        for i in range(batch_size):
            state = states[i][np.newaxis, :]
            target = targets[i]

            value = self.forward(state)
            loss = 0.5 * (value - target) ** 2

            grad_value = value - target

            hidden = np.tanh(np.dot(state, self.W1) + self.b1)
            grad_hidden = grad_value * (1 - hidden ** 2)

            grad_W1 += np.dot(state.T, grad_hidden)
            grad_b1 += grad_hidden
            grad_W2 += np.dot(hidden.T, grad_value)
            grad_b2 += grad_value

        return grad_W1 / batch_size, grad_b1 / batch_size, grad_W2 / batch_size, grad_b2 / batch_size


# 定义PPO类
class PPO:
    def __init__(self, n_states, n_hiddens, n_actions, actor_lr, critic_lr, lmbda, epochs, eps, gamma):
        self.actor = PolicyNet(n_states, n_hiddens, n_actions, actor_lr)
        self.critic = ValueNet(n_states, n_hiddens, critic_lr)
        self.gamma = gamma
        self.lmbda = lmbda
        self.epochs = epochs
        self.eps = eps

    def take_action(self, state):
        action_probs = self.actor.forward(state[np.newaxis, :])
        action = np.random.choice(np.arange(len(action_probs[0])), p=action_probs[0])
        return action

    def learn(self, transition_dict):
        states = np.array(transition_dict['states'])
        actions = np.array(transition_dict['actions'])
        rewards = np.array(transition_dict['rewards'])
        next_states = np.array(transition_dict['next_states'])
        dones = np.array(transition_dict['dones'])

        values = self.critic.forward(states).flatten()
        next_values = self.critic.forward(next_states).flatten()

        td_target = rewards + self.gamma * next_values * (1 - dones)
        td_delta = td_target - values

        advantage = 0
        advantage_list = []
        for delta in td_delta[::-1]:
            advantage = self.gamma * self.lmbda * advantage + delta
            advantage_list.append(advantage)
        advantage_list.reverse()
        advantage = np.array(advantage_list)

        old_log_probs = np.array([np.log(self.actor.forward(state)[0, action]) for state, action in zip(states, actions)])

        self.actor.update(states, actions, advantage, old_log_probs, self.eps)
        self.critic.update(states, td_target)


if __name__ == "__main__":
    env = gym.make('CartPole-v0')
    n_states = env.observation_space.shape[0]
    n_actions = env.action_space.n

    num_episodes = 300
    gamma = 0.9
    actor_lr = 1e-3
    critic_lr = 1e-2
    n_hiddens = 16
    lmbda = 0.95
    epochs = 10
    eps = 0.2

    agent = PPO(n_states, n_hiddens, n_actions, actor_lr, critic_lr, lmbda, epochs, eps, gamma)

    return_list = []
    for i in range(num_episodes):
        state = env.reset()
        done = False
        episode_return = 0
        transition_dict = {
           'states': [],
            'actions': [],
            'next_states': [],
           'rewards': [],
            'dones': [],
        }

        while not done:
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            transition_dict['states'].append(state)
            transition_dict['actions'].append(action)
            transition_dict['next_states'].append(next_state)
            transition_dict['rewards'].append(reward)
            transition_dict['dones'].append(done)
            state = next_state
            episode_return += reward

        return_list.append(episode_return)
        agent.learn(transition_dict)

        print(f'iter:{i}, return:{np.mean(return_list[-10:])}')

    env.close()