import numpy as np
import gymnasium as gym
from scipy.linalg import solve_continuous_are
from sklearn.linear_model import LinearRegression

class SystemIdentifier:
    def __init__(self, env, force_mag=10.0):
        self.env = env
        self.force_mag = force_mag  # CartPole的默认力值
        self.state_dim = env.observation_space.shape[0]
        self.action_dim = 1  # 视为单输入系统
        
    def collect_data(self, n_samples=10000, noise_scale=0.1):
        """采集系统响应数据"""
        X = []
        y = []
        
        for _ in range(n_samples):
            # 生成随机扰动状态
            state = self._generate_perturbed_state(noise_scale)
            
            # 随机选择动作（0或1）
            action = np.random.randint(2)
            force = self.force_mag if action == 1 else -self.force_mag
            
            # 获取状态导数
            x_dot = self._get_derivative(state, force)
            
            # 记录数据
            X.append(np.hstack([state, force]))
            y.append(x_dot)
            
        return np.array(X), np.array(y)
    
    def estimate_AB(self, X, y):
        """使用线性回归估计A、B矩阵"""
        reg = LinearRegression(fit_intercept=False).fit(X, y)
        
        # 分解系数矩阵
        AB = reg.coef_
        A = AB[:, :self.state_dim]
        B = AB[:, self.state_dim:]
        
        return A, B
    
    def _generate_perturbed_state(self, noise_scale):
        """生成平衡点附近的扰动状态"""
        return np.random.normal(scale=noise_scale, size=self.state_dim)
    
    def _get_derivative(self, state, force):
        """通过环境交互获取状态导数"""
        self.env.reset()
        # 使用环境物理引擎（无需知道具体参数）
        dt = self.env.unwrapped.tau  # 获取时间步长
        original_state = self.env.unwrapped.state
        
        try:
            # 设置临时状态
            self.env.unwrapped.state = state
            # 计算导数近似值
            new_state, reward, done, _, _ = self.env.step(int(force > 0))
            x_dot = (new_state - state) / dt
        finally:
            self.env.unwrapped.state = original_state
            
        return x_dot

def lqr_control(env, A, B, Q, R, max_steps=1000000):
    """改进的LQR控制器"""
    # 求解Riccati方程
    P = solve_continuous_are(A, B, Q, R)
    K = np.linalg.inv(R) @ B.T @ P
    env = gym.make("CartPole-v1", render_mode="human")
    state, info = env.reset()
    total_reward = 0
    for _ in range(max_steps):
        env.render()
        
        # 计算控制量
        u = -K @ state
        action = 1 if u > 0 else 0
        
        # 执行动作
        state, reward, done, _, _ = env.step(action)
        total_reward += reward
        if (int(total_reward) % 100==0):
            print("Total Reward:", total_reward)
            print(state)
        if done:
            break
            
    env.close()
    return total_reward

if __name__ == "__main__":
    env = gym.make("CartPole-v1")
    
    try:
        # 初始化系统辨识器
        identifier = SystemIdentifier(env)
        
        # 步骤1：采集训练数据
        X, y = identifier.collect_data(n_samples=20000)
        print(f"Collected {X.shape[0]} data samples")
        
        # 步骤2：估计系统矩阵
        A, B = identifier.estimate_AB(X, y)
        print("\nEstimated A matrix:\n", np.round(A, 3))
        print("\nEstimated B matrix:\n", np.round(B, 3))
        
        # 步骤3：设计LQR控制器
        Q = np.diag([1.0, 0.1, 10.0, 0.1])
        R = np.array([[0.1]])
        
        # 步骤4：运行控制器
        total_reward = lqr_control(env, A, B, Q, R)
        print(f"\nTotal Reward: {total_reward}")
        
    finally:
        env.close()