import gymnasium as gym
import numpy as np
from scipy.optimize import minimize

# 参数辨识结果（假设已经完成）
estimated_params = [1.0, 0.1, 0.5, 0.1, 0.01]  # M, m, l, b_cart, b_pole



def cartpole_dynamics(params, s, a):
    M, m, l, b_cart, b_pole = params
    g = 9.8
    x, x_dot, theta, theta_dot = s
    f = 10.0 if a == 1 else -10.0

    sin_theta = np.sin(theta)
    cos_theta = np.cos(theta)
    numerator = g * sin_theta + cos_theta * (-f - m*l*theta_dot**2*sin_theta)/(M + m)
    denominator = l * (4/3 - (m * cos_theta**2)/(M + m))
    theta_ddot = numerator / denominator
    x_ddot = (f + m*l*(theta_dot**2*sin_theta - theta_ddot*cos_theta)) / (M + m)
    return x_ddot, theta_ddot


def cartpole_step(state, action, params):
    x, x_dot, theta, theta_dot = state
    force = params['force_mag'] if action == 1 else -params['force_mag']
    costheta = np.cos(theta)
    sintheta = np.sin(theta)
    
    temp = (force + params['polemass_length'] * theta_dot ** 2 * sintheta) / params['total_mass']
    thetaacc = (params['gravity'] * sintheta - costheta * temp) / (params['length'] * (4.0/3.0 - params['masspole'] * costheta ** 2 / params['total_mass']))
    xacc = temp - params['polemass_length'] * thetaacc * costheta / params['total_mass']
    
    x = x + params['tau'] * x_dot
    x_dot = x_dot + params['tau'] * xacc
    theta = theta + params['tau'] * theta_dot
    theta_dot = theta_dot + params['tau'] * thetaacc
    
    return np.array([x, x_dot, theta, theta_dot])

# 定义参数
new_params = {
    'gravity': 9.8,
    'masscart': 1.0,
    'masspole': 0.1,
    'total_mass': 1.1,  # masscart + masspole
    'length': 0.5,  # 实际上是杆长的一半
    'polemass_length': 0.05,  # masspole * length
    'force_mag': 10.0,
    'tau': 0.02,  # 时间步长
}

def get_value(state):
    return ((state[2])**2)+0.001* (state[0])**2 + 0.001* (state[1])**2 +0.001* (state[3])**2


def sample_controller(state, params):
    dt = 0.02
    M, m, l, b_cart, b_pole = params
    min_cost_total = 999999999999.0
    should_action = 0
    action_all = -1
    search_step = 15
    all_state = [{"state": state, "step": 0, "value" : get_value(state)}]
    for step in range(search_step):
        next_all_state = []
        for info in all_state:
            current_state = info["state"]
            for action_step in range(2):
                next_state = cartpole_step(current_state, action_step, new_params)
                next_step = info["step"] + (1<< step)* action_step
                next_all_state.append({"state": next_state, "step":next_step, "value": get_value(next_state)})
        sorted_states = sorted(next_all_state, key=lambda x: x["value"])
        all_state = sorted_states[:30]

    all_state = sorted(all_state, key=lambda x: x["value"])
    return (all_state[0]["step"] % 2)

# MPC控制器
def mpc_controller(state, params, horizon=10):
    dt = 0.02
    M, m, l, b_cart, b_pole = params

    def cost(f_seq):
        cost_total = 0.0
        current_state = np.copy(state)
        for f in f_seq:
            x, x_dot, theta, theta_dot = current_state
            # 计算加速度
            x_ddot, theta_ddot = cartpole_dynamics(params, current_state, f)
            
            # 更新状态
            x_new = x + x_dot*dt + 0.5*x_ddot*dt**2
            x_dot_new = x_dot + x_ddot*dt
            theta_new = theta + theta_dot*dt + 0.5*theta_ddot*dt**2
            theta_dot_new = theta_dot + theta_ddot*dt
            current_state = [x_new, x_dot_new, theta_new, theta_dot_new]
            # 累计代价
            cost_total += (
                theta_new**2 + 0.001 * (x_new)**2 +  # 角度和位置的代价
                0.1*theta_dot_new**2 + 0.01*x_dot_new**2 +  # 角速度和速度的代价
                0.01*f**2 +  # 控制力的代价
                100 * max(0, np.abs(x_new) - 2.4) +  # 位置约束惩罚
                100 * max(0, np.abs(theta_new) - 0.2)  # 角度约束惩罚
            )
            #cost_total += get_value(current_state) + 0.01*f**2
        return cost_total

    def state_constraints(f_seq):
        # 约束函数：确保位置 x 在 [-2.4, 2.4] 范围内，角度 theta 在 [-0.2, 0.2] 弧度范围内，角速度 theta_dot 在 [-1.0, 1.0] 范围内
        current_state = np.copy(state)
        x_constraints = []
        theta_constraints = []
        theta_dot_constraints = []
        for f in f_seq:
            x, x_dot, theta, theta_dot = current_state
            x_ddot, theta_ddot = cartpole_dynamics(params, current_state, f)
            x_new = x + x_dot*dt + 0.5*x_ddot*dt**2
            theta_new = theta + theta_dot*dt + 0.5*theta_ddot*dt**2
            theta_dot_new = theta_dot + theta_ddot*dt
            x_constraints.append(x_new)  # 记录每个时间步的位置
            theta_constraints.append(theta_new)  # 记录每个时间步的角度
            theta_dot_constraints.append(theta_dot_new)  # 记录每个时间步的角速度
            # 更新状态
            x_dot_new = x_dot + x_ddot*dt
            current_state = [x_new, x_dot_new, theta_new, theta_dot_new]
        return np.array(x_constraints), np.array(theta_constraints), np.array(theta_dot_constraints)

    # 优化控制序列
    bounds = [(-10, 10)] * horizon  # 控制力的范围
    constraints = [
        {
            'type': 'ineq',
            'fun': lambda f_seq: 0.3 - np.abs(state_constraints(f_seq)[0])  # 位置约束 |x| <= 2.4
        },
        {
            'type': 'ineq',
            'fun': lambda f_seq: 0.2 - np.abs(state_constraints(f_seq)[1])  # 角度约束 |theta| <= 0.2 (约 12°)
        },
        {
            'type': 'ineq',
            'fun': lambda f_seq: 1.0 - np.abs(state_constraints(f_seq)[2])  # 角速度约束 |theta_dot| <= 1.0
        }
    ]
    res = minimize(cost, np.zeros(horizon), method='SLSQP', bounds=bounds, constraints=constraints)
    if not res.success:
        print("not success")
        return 0  # 默认动作
    optimal_f = res.x[0]
    return 1 if optimal_f >= 0 else 0

# 运行MPC控制
#env = gym.make("CartPole-v1", render_mode="human")
env = gym.make("CartPole-v1")

obs, info = env.reset()
total_reward = 0
action_all = -1
while True:
    # 之前ai生成的，不太行
    #action = mpc_controller(obs, estimated_params)
    # 魔改版本可work
    action = sample_controller(obs, estimated_params)
    obs, reward, done, _, _ = env.step(action)
    total_reward += reward
    if (int(total_reward) % 100 == 0) :
        print(obs)
    env.render()
    if done:
        break

print("Total Reward:", total_reward)
env.close()