import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split

# 读取输入数据
data_df = pd.read_csv("./data/source.csv")
inputs = data_df[["集水面积", "Sr", "Ks", "蒸发量", "降雨量(24h)"]].values
true_outputs = data_df["洪峰流量"].values

# 读取参数上下限
limit_df = pd.read_csv("./data/limit.csv", index_col=0)
lower_limit = limit_df.iloc[0].values
upper_limit = limit_df.iloc[1].values

# 参数名称
parameter_names = [
    "B",
    "alpha0",
    "JL",
    "KKS",
    "KKSS",
    "CS",
    "z1",
    "z2",
    "z3",
    "ni1",
    "ni2",
    "ni3",
    "H01",
    "H02",
    "H03",
    "Hc1",
    "Hc2",
    "Hc3",
    "Hr1",
    "Hr2",
    "Hr3",
    "delta1",
    "delta2",
    "delta3",
    "LS",
    "LSS",
    "L",
    "Gam1",
    "Gam2",
    "Gam3",
    "EC",
    "KKG",
    "LG",
]

# 数据归一化
# scaler = MinMaxScaler()
# inputs_scaled = scaler.fit_transform(inputs)

# 转换为torch张量
X: torch.Tensor = torch.tensor(inputs, dtype=torch.float32)
y = torch.tensor(true_outputs, dtype=torch.float32).view(-1, 1)

# 划分训练集和验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)


# 定义神经网络模型
class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        # 输入层到第一个隐藏层
        self.fc1 = nn.Linear(5, 64)
        # Dropout层
        self.dropout1 = nn.Dropout(p=0.1)  # 10%概率丢弃神经元
        # 第一个隐藏层到第二个隐藏层
        self.fc2 = nn.Linear(64, 64)
        # Dropout层
        self.dropout2 = nn.Dropout(p=0.1)  # 10%概率丢弃神经元
        # Batch Normalization
        self.bn1 = nn.BatchNorm1d(64)
        # 第二个隐藏层到输出层
        self.fc3 = nn.Linear(64, 1)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.dropout1(x)  # 应用Dropout
        x = torch.relu(self.fc2(x))
        x = self.dropout2(x)  # 应用Dropout
        x = self.bn1(x)  # 应用Batch Normalization
        x = self.fc3(x)
        return x


# 创建模型实例
model = MLP()

# 使用均方误差损失函数和Adam优化器，并加入L2正则化（权重衰减）
criterion = nn.MSELoss()
optimizer = optim.Adam(
    model.parameters(), lr=0.0005, weight_decay=0.01
)  # weight_decay设置L2正则化强度

# 训练过程
num_epochs = 100000
loss_values = []
best_mse = float("inf")  # 初始化最佳MSE为正无穷大
patience = 500  # 设定耐心值
trigger_times = 0  # 计数器，用于跟踪提前停止

for epoch in range(num_epochs):
    model.train()  # 切换到训练模式
    # 前向传播
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    # 后向传播
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    # 记录损失
    loss_values.append(loss.item())

    # 打印损失
    if (epoch + 1) % 100 == 0:
        print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")

    # 验证过程
    model.eval()  # 切换到评估模式
    with torch.no_grad():
        val_outputs = model(X_val)
        val_loss = criterion(val_outputs, y_val)

    # 检查验证损失
    if val_loss < best_mse:
        best_mse = val_loss
        # 保存最佳模型
        torch.save(model.state_dict(), "./output/best_model.pth")
        print(f"Best model saved at epoch {epoch + 1} with val MSE: {best_mse:.4f}")
        trigger_times = 0  # 重置耐心计数器
    else:
        trigger_times += 1
        if trigger_times >= patience:
            print("Early stopping triggered")
            break

# 可视化损失下降过程
plt.plot(loss_values)
plt.title("Loss Function (MSE) Over Iterations with Regularization and Dropout")
plt.xlabel("Iterations")
plt.ylabel("MSE")
plt.grid(True)
plt.show()

# 计算模型预测的洪峰流量
model.eval()
with torch.no_grad():
    predictions = model(X)

# 计算MSE
mse = mean_squared_error(y.numpy(), predictions.numpy())
print(f"Optimized MSE with Regularization and Dropout: {mse:.4f}")

# 保存优化后的参数
optimized_params = model.state_dict()
optimized_param_list = []
for name, param in optimized_params.items():
    optimized_param_list.append(param.numpy().flatten())

optimized_params_flattened = np.concatenate(optimized_param_list)

# 确保优化参数在上下限范围内
optimized_params_clipped = np.clip(
    optimized_params_flattened[:33], lower_limit, upper_limit
)

# 保存优化后的参数为CSV
optimized_param_df = pd.DataFrame(
    {
        "Parameter": parameter_names,
        "Optimized Value": optimized_params_clipped,
    }
)

output_filename = "./output/optimized_parameters_with_dropout.csv"
optimized_param_df.to_csv(output_filename, index=False)
print(f"Optimized parameters with Dropout saved to {output_filename}")
