import gymnasium as gym
from stable_baselines3 import PPO
import numpy as np
import random

# ================================
# 1. PID 控制器实现
# ================================
class PIDController:
    def __init__(self, Kp, Ki, Kd):
        self.Kp = Kp
        self.Ki = Ki
        self.Kd = Kd
        self.integral = 0
        self.prev_error = 0

    def act(self, obs):
        # obs = [cart_position, cart_velocity, pole_angle, pole_angular_velocity]
        pole_angle = obs[2]
        error = pole_angle
        self.integral += error
        derivative = error - self.prev_error
        force = self.Kp * error + self.Ki * self.integral + self.Kd * derivative
        action = 1 if force > 0 else 0
        self.prev_error = error
        return action

# ================================
# 2. 包装环境，加干扰
# ================================
class NoisyCartPoleEnv:
    def __init__(self, noise_level=0.1, render_mode="human"):
        self.env = gym.make("CartPole-v1", render_mode=render_mode)
        self.noise_level = noise_level

    def reset(self):
        return self.env.reset()

    def step(self, action):
        # 正常执行一步
        obs, reward, terminated, truncated, info = self.env.step(action)

        # 加入随机干扰：模拟车子受到额外推力
        if random.random() < 0.7:  # 70% 的概率加入干扰
            obs[1] += np.random.uniform(-self.noise_level, self.noise_level)  # 车速度
            obs[3] += np.random.uniform(-self.noise_level, self.noise_level)  # 杆角速度

        return obs, reward, terminated, truncated, info

    def close(self):
        self.env.close()

# ================================
# 3. RL 模型（PPO）
# ================================
def run_rl_model(episodes=5, noise_level=0.1):
    model = PPO.load("ppo_cartpole")
    env = NoisyCartPoleEnv(noise_level=noise_level)
    rewards = []
    for ep in range(episodes):
        obs, _ = env.reset()
        total_reward = 0
        done, truncated = False, False
        while not (done or truncated):
            action, _ = model.predict(obs, deterministic=True)
            obs, reward, done, truncated, _ = env.step(action)
            total_reward += reward
        print(f"[RL] Episode {ep+1}: reward = {total_reward}")
        rewards.append(total_reward)
    env.close()
    return np.mean(rewards)

# ================================
# 4. PID 控制器
# ================================
def run_pid_controller(episodes=5, noise_level=0.1):
    pid = PIDController(Kp=0.1, Ki=0.01, Kd=0.3)
    env = NoisyCartPoleEnv(noise_level=noise_level)
    rewards = []
    for ep in range(episodes):
        obs, _ = env.reset()
        total_reward = 0
        done, truncated = False, False
        while not (done or truncated):
            action = pid.act(obs)
            obs, reward, done, truncated, _ = env.step(action)
            total_reward += reward
        print(f"[PID] Episode {ep+1}: reward = {total_reward}")
        rewards.append(total_reward)
    env.close()
    return np.mean(rewards)

# ================================
# 5. 主程序对比
# ================================
if __name__ == "__main__":
    NOISE = 0.95  # 干扰强度，可以调大看看效果


    print("\n===== PID 控制器测试（带噪声） =====")
    pid_avg = run_pid_controller(noise_level=NOISE)

    print("\n===== RL (PPO) 测试（带噪声） =====")
    rl_avg = run_rl_model(noise_level=NOISE)

    

    print("\n===== 对比结果 =====")
    print(f"RL (PPO) 平均得分: {rl_avg:.2f}")
    print(f"PID 控制器平均得分: {pid_avg:.2f}")

