# %%
import numpy as np
import matplotlib.pyplot as plt

# %%
# 洛伦兹方程的函数定义
def lorenz(x, y, z, sigma=10, r=28, b=8/3):
    dx = -sigma * x + sigma * y
    dy = -x * z + r * x - y
    dz = x * y - b * z
    return dx, dy, dz

# 使用4阶Runge-Kutta方法生成洛伦兹吸引子的时间序列数据
def generate_lorenz_data(timesteps, dt=0.01):
    x, y, z = 1.0, 1.0, 1.0  # 初始条件
    data = []
    for _ in range(timesteps):
        dx1, dy1, dz1 = lorenz(x, y, z)
        x += (dx1 ) * dt 
        y += (dy1) * dt 
        z += (dz1 ) * dt 
        data.append([x, y, z])
    return np.array(data)

# 生成数据
timesteps = 1500
data = generate_lorenz_data(timesteps)

# %%
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')

ax.plot(data[:700, 0], data[:700, 1], data[:700, 2], lw=1, color='blue',label='Training Data')
ax.plot(data[700:, 0], data[700:, 1], data[700:, 2], lw=1, color='red',label='Testing Data')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_title('3D Lorenz Attractor')
# 添加图例
ax.legend()
plt.show()

# %%
# 构造输入特征，每个样本是过去20个时间点的x,y,z，用0填充不足的部分
data_extended = np.vstack([np.zeros((19, 3)), data])  # 前面填充19行0
X_new = np.zeros((len(data), 20))
X_new_y = np.zeros((len(data), 20))
X_new_z = np.zeros((len(data), 20))
for k in range(len(data)):
    window_x = data_extended[k:k+20,0]  # 获取20个时间点的数据X
    window_y = data_extended[k:k+20,1]  # 获取20个时间点的数据Y
    window_z = data_extended[k:k+20,2]  # 获取20个时间点的数据Z
    X_new[k] = window_x.flatten()
    X_new_y[k] = window_y.flatten()
    X_new_z[k] = window_z.flatten()
    #X_new[k] = window

# 划分训练集和测试集
X_train_x = X_new[:700,]
X_train_y = X_new_y[:700,]
X_train_z = X_new_z[:700,]

y_train_x = data[:700, 0]
y_train_y = data[:700, 1]
y_train_z = data[:700, 2]

X_test_x = X_new[700:]
X_test_y = X_new_y[700:]
X_test_z = X_new_z[700:]

y_test_x = data[700:, 0]
y_test_y = data[700:, 1]
y_test_z = data[700:, 2]

print(X_train_z.shape)

# %% [markdown]
# MLP算法的实现 （激活函数为relu或者sigmond函数）

# %%
class MLPRegressorManual:
    def __init__(self, hidden_layer_sizes=200, max_iter=100, learning_rate_init=0.1, alpha=0.0001):
        self.hidden_size = hidden_layer_sizes
        self.max_iter = max_iter
        self.lr = learning_rate_init  # 初始学习率 (10^-1)
        self.alpha = alpha  # L2 正则化系数
        self.W1, self.b1 = None, None  # W1: 输入层到隐藏层的权重矩阵，b1: 隐藏层偏置
        self.W2, self.b2 = None, None  # W2: 隐藏层到输出层的权重矩阵，b2: 输出层偏置
        self.train_errors = []

    def _initialize_parameters(self, n_features):
        # He 初始化权重
        self.W1 = np.random.randn(n_features, self.hidden_size) * np.sqrt(2. / n_features)
        self.b1 = np.zeros((1, self.hidden_size))
        self.W2 = np.random.randn(self.hidden_size, 1) * np.sqrt(2. / self.hidden_size)
        self.b2 = np.zeros((1, 1))

    def _forward(self, X):
        self.Z1 = X @ self.W1 + self.b1  # 隐藏层输入
        self.A1 = np.maximum(0, self.Z1) # ReLU 激活
        self.Z2 = self.A1 @ self.W2 + self.b2  # 输出层（无激活函数）
        return self.Z2

    def _backward(self, X, y):
        m = X.shape[0]
        # 输出层梯度
        dZ2 = (self.Z2 - y) / m  # 计算输出层误差
        dW2 = self.A1.T @ dZ2 + (self.alpha / m) * self.W2
        db2 = np.sum(dZ2, axis=0, keepdims=True)
        
        # 隐藏层梯度
        dA1 = dZ2 @ self.W2.T
        dZ1 = dA1 * (self.Z1 > 0)  # ReLU 的导数
        dW1 = X.T @ dZ1 + (self.alpha / m) * self.W1
        db1 = np.sum(dZ1, axis=0, keepdims=True)
        
        return dW1, db1, dW2, db2
    
    def _update_params_adam(self, grads, t, beta1=0.9, beta2=0.999, eps=1e-8):
        dW1, db1, dW2, db2 = grads
        # 初始化动量和方差
        if not hasattr(self, 'm_W1'):
            self.m_W1, self.v_W1 = np.zeros_like(self.W1), np.zeros_like(self.W1)
            self.m_b1, self.v_b1 = np.zeros_like(self.b1), np.zeros_like(self.b1)
            self.m_W2, self.v_W2 = np.zeros_like(self.W2), np.zeros_like(self.W2)
            self.m_b2, self.v_b2 = np.zeros_like(self.b2), np.zeros_like(self.b2)
        
        # 更新 W1 和 b1
        for param, grad, m, v in zip([self.W1, self.b1, self.W2, self.b2],
                                     [dW1, db1, dW2, db2],
                                     [self.m_W1, self.m_b1, self.m_W2, self.m_b2],
                                     [self.v_W1, self.v_b1, self.v_W2, self.v_b2]):
            m[:] = beta1 * m + (1 - beta1) * grad
            v[:] = beta2 * v + (1 - beta2) * (grad ** 2)
            m_hat = m / (1 - beta1 ** t)
            v_hat = v / (1 - beta2 ** t)
            param -= self.lr * m_hat / (np.sqrt(v_hat) + eps)
            print(param)
    

    def fit(self, X, y):
        y = y.reshape(-1, 1)
        self._initialize_parameters(X.shape[1])
        t = 0  # Adam 时间步
        
        for epoch in range(self.max_iter):
            # 学习率线性退火，从 10^-1 逐渐降低到 10^-5
            self.lr = 0.1 - (0.1 - 1e-5) * (epoch / self.max_iter)

            # 前向传播
            y_pred = self._forward(X)
            
            # 计算损失（均方误差 + L2 正则化）
            mse_loss = np.mean((y_pred - y) ** 2)
            self.train_errors.append(mse_loss)
            reg_loss = (self.alpha / (2 * X.shape[0])) * (np.sum(self.W1 ** 2) + np.sum(self.W2 ** 2))
            total_loss = mse_loss + reg_loss
            
            # 反向传播
            grads = self._backward(X, y)
            
            # 使用 Adam 更新参数
            t += 1
            self._update_params_adam(grads, t)
    
    def predict(self, X):
        Z1 = X @ self.W1 + self.b1
        A1 = np.maximum(0, Z1)
        return A1 @ self.W2 + self.b2

# %%
# 假设X_train, y_train, X_test已经准备好
mlp_manual_x = MLPRegressorManual(hidden_layer_sizes=200, max_iter=50, learning_rate_init=0.1, alpha=0.0001)

mlp_manual_x.fit(X_train_x, y_train_x)

# 假设X_train, y_train, X_test已经准备好
mlp_manual_y = MLPRegressorManual(hidden_layer_sizes=200, max_iter=50, learning_rate_init=0.1, alpha=0.0001)

mlp_manual_y.fit(X_train_y, y_train_y)

# 假设X_train, y_train, X_test已经准备好
mlp_manual_z = MLPRegressorManual(hidden_layer_sizes=200, max_iter=50, learning_rate_init=0.1, alpha=0.0001)

mlp_manual_z.fit(X_train_z, y_train_z)



# %%

plt.plot(mlp_manual_x.train_errors, label="Training Error")
plt.xlabel("Epochs")
plt.ylabel("Mean Squared Error")
plt.title("Training MSE of X for each Epoch")
plt.show()

predictions_x = mlp_manual_x.predict(X_test_x)

# 绘制预测结果与真实数据的对比
plt.plot(y_test_x, label="True Values of X")
plt.plot(predictions_x, label="Predicted Values of X")
plt.xlabel("Time Step")
plt.ylabel("Z Value")
plt.title("Prediction vs Actual Lorenz Attractor")
plt.legend()
plt.show()


# %%
plt.plot(mlp_manual_y.train_errors, label="Training Error")
plt.xlabel("Epochs")
plt.ylabel("Mean Squared Error")
plt.title("Training MSE of Y for each Epoch")
plt.show()

predictions_y = mlp_manual_y.predict(X_test_y)

# 绘制预测结果与真实数据的对比
plt.plot(y_test_y, label="True Values of Y")
plt.plot(predictions_y, label="Predicted Values of Y")
plt.xlabel("Time Step")
plt.ylabel("Z Value")
plt.title("Prediction vs Actual Lorenz Attractor")
plt.legend()
plt.show()

# %%
plt.plot(mlp_manual_z.train_errors, label="Training Error")
plt.xlabel("Epochs")
plt.ylabel("Mean Squared Error")
plt.title("Training MSE of Z for each Epoch")
plt.show()

predictions_z = mlp_manual_z.predict(X_test_z)

# 绘制预测结果与真实数据的对比
plt.plot(y_test_z, label="True Values of Z")
plt.plot(predictions_z, label="Predicted Values of Z")
plt.xlabel("Time Step")
plt.ylabel("Z Value")
plt.title("Prediction vs Actual Lorenz Attractor")
plt.legend()
plt.show()

# 创建3D画布
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')

# 绘制预测数据（红色圆点）
ax.plot(predictions_x, predictions_y, predictions_z, 
         lw=1,  c='r',  label='Predictions')

# 绘制真实数据（蓝色叉号）
ax.plot(y_test_x, y_test_y, y_test_z, 
          lw=1,  c='b',  label='Ground Truth')

# 坐标轴标签和图形修饰
ax.set_xlabel('X Axis', fontsize=12, labelpad=15)
ax.set_ylabel('Y Axis', fontsize=12, labelpad=15)
ax.set_zlabel('Z Axis', fontsize=12, labelpad=15)
ax.set_title('3D Scatter Comparison: Predictions vs Ground Truth', 
            fontsize=14, pad=20)
ax.legend(loc='upper right', fontsize=10)
ax.grid(True, alpha=0.5)

# 添加颜色条（可选）
scat = ax.scatter([], [], [], c=[], cmap='viridis')  # 创建

plt.tight_layout()
plt.show()



