# 编写训练算法的python代码
# BP神经网络模型
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from scipy.stats import pearsonr
from matplotlib.font_manager import FontProperties


from tqdm import tqdm  # 进度条（可选）
from bayes_opt import BayesianOptimization

font_path = 'C:/Windows/Fonts/msyh.ttc'
font_prop = FontProperties(fname=font_path)


# 计算相关系数（皮尔逊相关系数）

# corr_inlet_outlet, _ = pearsonr(data['进水浊度(NTU)'], data['出水浊度(NTU)'])
# corr_pac_outlet, _ = pearsonr(data['PAC单耗(mg/L)'], data['出水浊度(NTU)'])
# print(f"\n进水浊度与出水浊度相关系数：{corr_inlet_outlet:.2f}")
# print(f"PAC单耗与出水浊度相关系数：{corr_pac_outlet:.2f}")
def BP_analysis(data):
    # ---------------------- 3. 数据预处理 ----------------------
    # 分离输入特征（X）和输出目标（y）
    X = data['进水浊度(NTU)'].values  # 输入：1个特征
    y = data[['出水浊度(NTU)','PAC单耗(mg/L)']].values                    # 输出：2个目标

    # 划分训练集（80%）和测试集（20%）
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # 特征归一化（神经网络对尺度敏感）
    scaler_X = MinMaxScaler()
    scaler_y = MinMaxScaler()
    X_train_scaled = scaler_X.fit_transform(X_train.reshape(-1, 1))
    X_test_scaled = scaler_X.transform(X_test.reshape(-1, 1))
    y_train_scaled = scaler_y.fit_transform(y_train)
    y_test_scaled = scaler_y.transform(y_test)



    # trainDta_MATLAB = np.hstack((X_train_scaled, y_train_scaled))

    # df = pd.DataFrame(trainDta_MATLAB,columns=['org_Tur','In_Flow','Out_Tur','PAC_min'])

    # df.to_excel('matlab_Data.xlsx', index=False,engine='openpyxl')

    
    # 加载模型并判断是否有效
    try:
        model = Sequential('BP_model.h5')
        print("模型加载成功！") #加载模型成功后，程序直接运行 绘制训练损失曲线
        
    except:
        print("模型加载失败！")

       # ---------------------- 1. 构建BP神经网络模型 ----------------------
    
    model = Sequential([
        Dense(16, activation='relu', input_shape=(1,)),  # 输入层（2特征）→ 隐藏层1（16神经元，ReLU激活）
        Dense(16, activation='relu'),                    # 隐藏层2（16神经元，ReLU激活）
        Dense(2)                                         # 输出层（1神经元，线性激活，用于回归）
    ])

    # 编译模型
    model.compile(
        optimizer='adam',         # Adam优化器（自适应学习率）
        loss='mean_squared_error',# 损失函数：均方误差（MSE）
        metrics=['mean_absolute_error']  # 评估指标：平均绝对误差（MAE）
    )

    # 查看模型结构
    model.summary()
    
    # ---------------------- 2. 训练模型 ----------------------
    history = model.fit(
        X_train_scaled, y_train_scaled,
        epochs=100,               # 训练轮次
        batch_size=32,            # 批次大小
        validation_split=0.2,     # 从训练集划分20%作为验证集
        verbose=1                 # 输出训练进度（0=静默，1=进度条，2=每轮一行）
    )
    # 保存模型
    # model.save('BP_model.h5')

    # 计算预测结果，并保存到文件中
    x1_new = np.arange(0, 3200,5)
    # 500个点，值为1500
    x2_new  =np.array([1000]*len(x1_new))
    X_new = np.vstack((x1_new, x2_new)).T
    

    X_new_scaled = scaler_X.transform(x1_new.reshape(-1, 1))
    y_new_scaled = model.predict(X_new_scaled)
    y_new_pred = scaler_y.inverse_transform(y_new_scaled)
 
    # 把X_new和y_new_pred保存到文件中
    BP_pred_Data = np.hstack((x1_new.reshape(-1, 1), y_new_pred))
    df = pd.DataFrame(BP_pred_Data,columns=['org_Tur','Out_Tur','PAC_min'])
    df.to_excel('BP_pred_Data.xlsx', index=False,engine='openpyxl')

    # 绘制训练损失曲线
    plt.figure(figsize=(8, 4))
    plt.plot(history.history['loss'], label='training loss (MSE)')
    plt.plot(history.history['val_loss'], label='validationloss (MSE)')
    plt.xlabel('trainNum (Epochs)')
    plt.ylabel('MSE')
    plt.title('model loss')
    plt.legend()
    plt.show()
  
    # ---------------------- 3. 模型评估 ----------------------
    # 用测试集评估模型性能
    test_loss, test_mae = model.evaluate(X_test_scaled, y_test_scaled, verbose=0)
    print(f"\n测试集评估结果：")
    print(f"均方误差 (MSE): {test_loss:.4f}")
    print(f"平均绝对误差 (MAE): {test_mae:.4f}")

    # 预测测试集并反归一化（恢复原始尺度）
    y_pred_scaled = model.predict(X_test_scaled)
    y_pred = scaler_y.inverse_transform(y_pred_scaled)
    y_test_real = scaler_y.inverse_transform(y_test_scaled)

    x1_new = np.arange(1, 1000,2)

    best_x2_bayes_arr = np.zeros(len(x1_new))
    best_y_bayes_arr = np.zeros(len(x1_new))

    best_x2_grid_search = np.zeros(len(x1_new))
    best_y_grid_search = np.zeros(len(x1_new))
    """
    for i in x1_new:

      best_x2, best_y, all_results = grid_search_optimize(model, scaler_X, scaler_y, x1_new=x1_new[i-1], target_T=1.0)
      print(f"\n最优PAC单耗：{best_x2:.2f} mg/L")
      print(f"对应出水浊度：{best_y:.2f} NTU")
      print(i-1)
      best_x2_grid_search[i-1] = best_x2
      best_y_grid_search[i-1] = best_y

    # 把x1_new best_x2_grid_search best_y_grid_search 三个数组合并成一个dataframe
    BP_grid_search_Data = np.vstack((x1_new, best_x2_grid_search, best_y_grid_search)).T
    df = pd.DataFrame(BP_grid_search_Data,columns=['org_Tur','PAC_min','Out_Tur'])

    df.to_excel('BP_grid_search_Data.xlsx', index=False,engine='openpyxl')
    """
    """
    for i in range(1, len(x1_new)):

      best_x2_bayes, best_y_bayes = bayesian_optimize(
      model=model,
      scaler_X=scaler_X,
      scaler_y=scaler_y,
      x1_new=x1_new[i-1],
      x2_bounds=(0, 100),
      target_T=1.0,
      init_points=5,
      n_iter=20
     )
      best_x2_bayes_arr[i-1] = best_x2_bayes
      best_y_bayes_arr[i-1] = best_y_bayes
      print(f"\n{x1_new[i-1]} NTU，最优PAC单耗：{best_x2_bayes:.2f} mg/L")
      print(f"对应出水浊度：{best_y_bayes:.2f} NTU")
      print(i-1)

        # 把x1_new best_x2_grid_search best_y_grid_search 三个数组合并成一个dataframe

    """
    
    # 绘制预测值 vs 真实值对比图
    plt.figure(figsize=(8, 4))
    plt.scatter(y_test_real, y_pred, alpha=0.6)
    plt.plot([y_test_real.min(), y_test_real.max()], [y_test_real.min(), y_test_real.max()], 'r--')
    plt.xlabel('pacNum')
    plt.ylabel('pre_pacNum')
    plt.title('testpacNum vs pacNum')
    plt.grid(True)
    plt.show()

    # 计算残差（预测误差）
    residuals = y_test_real.flatten() - y_pred.flatten()
    plt.figure(figsize=(8, 4))
    plt.hist(residuals, bins=30, edgecolor='black')
    plt.xlabel('residuals (NTU)')#残差
    plt.ylabel('number')
    plt.title('pre_residuals')
    plt.show()



    