import gymnasium as gym
import numpy as np
import cvxpy as cp

# 系统参数
g = 9.8
m_cart = 1.0
m_pole = 0.1
total_mass = m_cart + m_pole
length = 0.5  # 半长
dt = 0.02  # 时间步长

# 连续时间线性化矩阵
A_cont = np.array([
    [0, 1, 0, 0],
    [0, 0, (m_pole * g) / m_cart, 0],
    [0, 0, 0, 1],
    [0, 0, (total_mass * g) / (m_cart * length), 0]
])
B_cont = np.array([[0], [1.0/m_cart], [0], [-1.0/(m_cart * length)]])

# 离散化
A_discrete = np.eye(4) + A_cont * dt
B_discrete = B_cont * dt

# MPC参数
N = 10  # 预测时域
Q = np.diag([1.0, 0.1, 10.0, 0.1])  # 状态权重
R = 0.1  # 控制输入权重
Q_f = np.diag([1.0, 0.1, 10.0, 0.1]) * 10  # 终端权重
u_max = 10.0  # 最大控制力（±10对应离散动作）
x_max = 2.4  # 小车位置限制
theta_max = 12 * np.pi / 180  # 角度限制（±12度）

# 创建环境
env = gym.make("CartPole-v1")
#env = gym.make('CartPole-v1')
observation, info = env.reset()
total_reward = 0

while True:
    # 将当前状态转换为列向量 (4,1)
    s0 = np.array(observation).reshape(-1, 1)
    
    # 构建优化问题
    U = cp.Variable(N)
    cost = 0.0
    constraints = []
    
    # 预测状态，初始化为列向量
    states = [s0]
    for k in range(N):
        s_prev = states[-1]
        # 计算下一状态（s_prev为列向量，U[k]为标量）
        s_next = A_discrete @ s_prev + B_discrete * U[k]
        states.append(s_next)
        
        # 状态代价和控制代价（确保s_prev是列向量）
        cost += cp.quad_form(s_prev, Q) + R * cp.square(U[k])
        
        # 控制输入约束
        constraints += [U[k] <= u_max, U[k] >= -u_max]
        
        # 软约束：小车位置和角度（访问s_next的元素需使用二维索引）
        cost += 100 * cp.pos(s_next[0, 0] - x_max)**2 + 100 * cp.pos(-x_max - s_next[0, 0])**2
        cost += 1000 * cp.pos(s_next[2, 0] - theta_max)**2 + 1000 * cp.pos(-theta_max - s_next[2, 0])**2
    
    # 终端代价
    cost += cp.quad_form(states[-1], Q_f)
    # 求解
    prob = cp.Problem(cp.Minimize(cost), constraints)
    prob.solve(solver=cp.OSQP, verbose=False)
    
    if prob.status == cp.OPTIMAL:
        u_opt = U.value[0]
    else:
        u_opt = 0.0
    
    # 转换为离散动作
    action = 1 if u_opt > 0 else 0
    
    # 执行动作
    observation, reward, done, _, _ = env.step(action)
    total_reward += reward
    env.render()
    if (int(total_reward) % 100 == 0) :
        print(observation)
    if done:
        break

env.close()
print(f"Total Reward: {total_reward}")