import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error

# 洛伦兹方程的函数定义
def lorenz(x, y, z, sigma=10, r=28, b=8/3):
    dx = -sigma * x + sigma * y
    dy = -x * z + r * x - y
    dz = x * y - b * z
    return dx, dy, dz

# 使用4阶Runge-Kutta方法生成洛伦兹吸引子的时间序列数据
def generate_lorenz_data(timesteps, dt=0.01):
    x, y, z = 1.0, 1.0, 1.0  # 初始条件
    data = []
    for _ in range(timesteps):
        dx1, dy1, dz1 = lorenz(x, y, z)
        dx2, dy2, dz2 = lorenz(x + dx1*dt/2, y + dy1*dt/2, z + dz1*dt/2)
        dx3, dy3, dz3 = lorenz(x + dx2*dt/2, y + dy2*dt/2, z + dz2*dt/2)
        dx4, dy4, dz4 = lorenz(x + dx3*dt, y + dy3*dt, z + dz3*dt)
        
        x += (dx1 + 2*dx2 + 2*dx3 + dx4) * dt / 6
        y += (dy1 + 2*dy2 + 2*dy3 + dy4) * dt / 6
        z += (dz1 + 2*dz2 + 2*dz3 + dz4) * dt / 6
        data.append([x, y, z])
    return np.array(data)

# 生成数据
timesteps = 1500
data = generate_lorenz_data(timesteps)

# 选择训练和测试数据
train_data = data[:700]
test_data = data[700:1500]

# 将训练和测试数据的前两列作为输入，第三列作为输出
X_train = train_data[:, :2]
y_train = train_data[:, 2]
X_test = test_data[:, :2]
y_test = test_data[:, 2]

# 定义多层感知机回归模型
mlp = MLPRegressor(hidden_layer_sizes=(200,), max_iter=100, learning_rate_init=0.1, alpha=0.0001, solver='adam')

# 训练模型
mlp.fit(X_train, y_train)

# 绘制均方误差与epoch的关系
train_errors = []
for i in range(1, 51):
    mlp.max_iter = i
    mlp.fit(X_train, y_train)
    train_errors.append(mean_squared_error(y_train, mlp.predict(X_train)))

plt.plot(range(1, 51), train_errors, label="Training Error")
plt.xlabel("Epochs")
plt.ylabel("Mean Squared Error")
plt.title("Training MSE vs Epochs")
plt.show()

# 使用训练好的模型进行预测
y_pred = mlp.predict(X_test)

# 绘制预测结果与真实数据的对比
plt.plot(y_test, label="True Values")
plt.plot(y_pred, label="Predicted Values")
plt.xlabel("Time Step")
plt.ylabel("Z Value")
plt.title("Prediction vs Actual Lorenz Attractor")
plt.legend()
plt.show()
