{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "48776c61",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(activity_regularizer=activity_regularizer, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 556\n",
      "\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 76ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step \n",
      "RMSEc (校正均方根误差): 9.313228998891038\n",
      "RMSEp (预测均方根误差): 11.811557457016724\n",
      "Rcal (校正集相关系数): 0.9828395909200296\n",
      "Rval (验证集相关系数): 0.9580986342555358\n",
      "RPD (相对预测偏差): 3.488020660015068\n",
      "Training time: 70.92226362228394 seconds\n",
      "Testing time: 0.20054101943969727 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D  # 导入 MaxPooling1D\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 345)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "# 构建CNN模型\n",
    "def CNN_model(input_shape, output_shape):\n",
    "    model = Sequential()\n",
    "    model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=input_shape))\n",
    "    model.add(MaxPooling1D(pool_size=2))  # 使用导入的 MaxPooling1D\n",
    "    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))\n",
    "    model.add(MaxPooling1D(pool_size=2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(64, activation='relu'))\n",
    "    model.add(Dense(output_shape))\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "model_clstm = CNN_model(X_train.shape[1:], 1)\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "history = model_clstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_clstm = model_clstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_clstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_clstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d87c6dcb",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(**kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 557\n",
      "\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 103ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step \n",
      "RMSEc (校正均方根误差): 6.068335909994017\n",
      "RMSEp (预测均方根误差): 12.832848968981251\n",
      "Rcal (校正集相关系数): 0.9930569472205788\n",
      "Rval (验证集相关系数): 0.9525652627446468\n",
      "RPD (相对预测偏差): 3.2104294639960993\n",
      "Training time: 67.85579323768616 seconds\n",
      "Testing time: 0.2540748119354248 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D  # 导入 MaxPooling1D\n",
    "from tensorflow.keras.layers import LSTM, Flatten  # 导入 Flatten 和 LSTM\n",
    "from tensorflow.keras.layers import SimpleRNN\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 345)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "# 构建RNN模型\n",
    "def RNN_model(input_shape, output_shape):\n",
    "    model = Sequential()\n",
    "    # 对于 SimpleRNN，需要指定时间步长和特征数\n",
    "    # 如果您的时间步长是可变的，可以设置为 None\n",
    "    # 假设 input_shape 是 (samples, timesteps, features)\n",
    "    model.add(SimpleRNN(128, input_shape=input_shape, activation='relu'))\n",
    "    model.add(Dense(output_shape))\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "\n",
    "# 创建RNN模型\n",
    "model_rnn = RNN_model(X_train.shape[1:], 1)\n",
    "\n",
    "# 训练RNN模型\n",
    "start_train_time = time.time()\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "history = model_rnn.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_rnn = model_rnn.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_rnn.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_rnn)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "396c1297",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(**kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 662\n",
      "\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 234ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step \n",
      "RMSEc (校正均方根误差): 6.4811267993593376\n",
      "RMSEp (预测均方根误差): 5.943985094443609\n",
      "Rcal (校正集相关系数): 0.9915610188465539\n",
      "Rval (验证集相关系数): 0.9902351130733497\n",
      "RPD (相对预测偏差): 6.9312011693201985\n",
      "Training time: 92.81368660926819 seconds\n",
      "Testing time: 0.5081334114074707 seconds\n"
     ]
    }
   ],
   "source": [
    "#6：1\n",
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D  # 导入 MaxPooling1D\n",
    "from tensorflow.keras.layers import LSTM, Flatten  # 导入 Flatten 和 LSTM\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 345)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "# 构建LSTM模型\n",
    "def LSTM_model(input_shape, output_shape):\n",
    "    model = Sequential()\n",
    "    model.add(LSTM(128, input_shape=(1, 10), return_sequences=True))  # 修正 input_shape\n",
    "    model.add(LSTM(128))\n",
    "    model.add(Dense(output_shape))\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "\n",
    "# 调整数据形状以匹配LSTM输入要求\n",
    "# LSTM需要三维输入，因此需要添加一个时间步长维度\n",
    "X_train = np.expand_dims(X_train, axis=1)  # 现在形状是(samples, 1, features)\n",
    "X_test = np.expand_dims(X_test, axis=1)    # 现在形状是(samples, 1, features)\n",
    "\n",
    "# 创建LSTM模型\n",
    "model_lstm = LSTM_model(X_train.shape[1:], 1)\n",
    "\n",
    "start_train_time = time.time()\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "history = model_lstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_lstm = model_lstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_lstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_lstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
