import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error

# 1. 读取数据
limit_data = pd.read_csv("./data/limit.csv", index_col=0)
source_data = pd.read_csv("./data/source.csv")

# 提取参数上下限
param_names = limit_data.index.tolist()
lower_limits = limit_data.loc["下限"].values
upper_limits = limit_data.loc["上限"].values

# 2. 数据预处理
# 输入数据 (集水面积, Sr, Ks, 蒸发量, 降雨量)
X = source_data[["集水面积", "Sr", "Ks", "蒸发量", "降雨量(24h)"]].values
y = source_data["洪峰流量"].values

# 3. 构建多项式模型（最高3次多项式）
poly = PolynomialFeatures(degree=3)
X_poly = poly.fit_transform(X)

# 4. 梯度下降法优化33个参数
# 确保提取的上下限数组长度和多项式参数一致
num_params = X_poly.shape[1]  # 多项式特征的列数
if len(lower_limits) < num_params:
    lower_limits = np.pad(
        lower_limits, (0, num_params - len(lower_limits)), "edge"
    )  # 按需要填充
if len(upper_limits) < num_params:
    upper_limits = np.pad(
        upper_limits, (0, num_params - len(upper_limits)), "edge"
    )  # 按需要填充

# 初始化参数
params = np.random.uniform(
    lower_limits, upper_limits, size=num_params
)  # 确保params长度与X_poly列数一致


# 计算多项式模型的输出
def model(params, X_poly):
    return np.dot(X_poly, params)


# 计算损失函数（MSE）
def mse(params, X_poly, y):
    y_pred = model(params, X_poly)
    return mean_squared_error(y, y_pred)


# 梯度下降更新规则
def gradient_descent(X_poly, y, params, learning_rate=0.000001, n_iterations=10000):
    losses = []
    for iteration in range(n_iterations):
        # 计算预测值
        y_pred = model(params, X_poly)
        # 计算梯度
        gradient = -2 * np.dot(X_poly.T, (y - y_pred)) / len(y)
        # 更新参数
        params -= learning_rate * gradient
        # 计算损失
        loss = mse(params, X_poly, y)
        losses.append(loss)

        # 确保参数在上下限之间
        params = np.clip(params, lower_limits, upper_limits)

        if iteration % 100 == 0:
            print(f"Iteration {iteration}, MSE: {loss}")

    return params, losses


# 执行梯度下降
optimized_params, losses = gradient_descent(X_poly, y, params)

# 5. 可视化损失下降
plt.plot(losses)
plt.xlabel("Iterations")
plt.ylabel("Loss (MSE)")
plt.title("Loss Convergence during Gradient Descent")
plt.show()

# 6. 保存优化后的参数
optimized_params_dict = {
    param_names[i]: optimized_params[i] for i in range(len(param_names))
}
optimized_params_df = pd.DataFrame(optimized_params_dict, index=[0])
optimized_params_df.to_csv("./optimized_params.csv", index=False)

# 7. 打印最终的MSE指标
final_mse = mse(optimized_params, X_poly, y)
print(f"Final MSE: {final_mse}")

# 8. 保存模型公式（多项式形式）
model_formula = "y = " + " + ".join(
    [f"{optimized_params[i]:.4f} * x^{i}" for i in range(len(optimized_params))]
)
with open("./model_formula.txt", "w") as f:
    f.write(model_formula)

print("Optimized parameters and model formula have been saved.")
