{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "eea0b7c5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "        番泻苷B\n",
      "0  65.048774\n",
      "1  62.287028\n",
      "2  65.272927\n",
      "3  75.099722\n",
      "4  78.549335\n",
      "特征集样本数量： 403\n",
      "目标变量集样本数量： 403\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 154ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 10.823559040224854\n",
      "RMSEp (预测均方根误差): 25.432007190714714\n",
      "Rcal (校正集相关系数): 0.986359773299993\n",
      "Rval (验证集相关系数): 0.9390609972938009\n",
      "RPD (相对预测偏差): 2.87964715479107\n",
      "Training time: 188.5791506767273 seconds\n",
      "Testing time: 0.6454081535339355 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# 读取红外谱图数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "\n",
    "# 使用PCA提取特征\n",
    "# 假设我们需要提取前10个主成分\n",
    "pca = PCA(n_components=10)\n",
    "pca.fit(ir_data)\n",
    "pca_features = pca.transform(ir_data)\n",
    "\n",
    "# 使用小波变换提取特征\n",
    "# 假设使用Daubechies 4小波基函数，并提取前3个近似系数\n",
    "wavelet = pywt.Wavelet('db4')\n",
    "coeffs = pywt.wavedec(np.array(ir_data), wavelet, level=3)\n",
    "wt_features = coeffs[0]\n",
    "\n",
    "# 将特征转换成DataFrame格式\n",
    "pca_features_df = pd.DataFrame(pca_features, columns=['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6', 'PC7', 'PC8', 'PC9', 'PC10'])\n",
    "\n",
    "# 小波变换特征提取\n",
    "# coeffs[0]是第一层的近似系数，其形状是(244, 200)\n",
    "# 所以需要调整DataFrame的列数\n",
    "wt_features_df = pd.DataFrame(coeffs[0], columns=['WT' + str(i) for i in range(200)])\n",
    "\n",
    "# 读取'番泻苷含量'工作表\n",
    "targets = pd.read_excel(xls, '番泻苷含量')\n",
    "# 选取番泻苷A作为目标变量\n",
    "targets = targets[['番泻苷B']]\n",
    "# 查看目标变量的前几行\n",
    "print(targets.head())\n",
    "\n",
    "# 对特征集和目标变量集进行索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# 再次检查对齐后的样本数量\n",
    "print(\"特征集样本数量：\", pca_features_df.shape[0])\n",
    "print(\"目标变量集样本数量：\", targets.shape[0])\n",
    "\n",
    "# 分割数据集\n",
    "X_train, X_test, y_train, y_test = train_test_split(pca_features_df, targets, test_size=0.2, random_state=42)\n",
    "\n",
    "# 构建 ACLSTM 模型\n",
    "model_aclstm = Sequential()\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu'))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(LSTM(128, return_sequences=True))\n",
    "model_aclstm.add(LSTM(128))\n",
    "model_aclstm.add(Dense(1))\n",
    "model_aclstm.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "\n",
    "import time\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0)\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "# 测试所需时间\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "7eca486a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 181\n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 8.82209880003032\n",
      "RMSEp (预测均方根误差): 22.70660004672586\n",
      "Rcal (校正集相关系数): 0.9913890088778213\n",
      "Rval (验证集相关系数): 0.9518523432397846\n",
      "RPD (相对预测偏差): 3.225282825110917\n",
      "Training time: 33.41567802429199 seconds\n",
      "Testing time: 0.06869745254516602 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 设置早停机制\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "cef00d72",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 160ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 12.582716136464096\n",
      "RMSEp (预测均方根误差): 27.88915678189944\n",
      "Rcal (校正集相关系数): 0.9820935786488909\n",
      "Rval (验证集相关系数): 0.9353014211725227\n",
      "RPD (相对预测偏差): 2.644219086648639\n",
      "Training time: 193.8965847492218 seconds\n",
      "Testing time: 0.6647062301635742 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# 读取红外谱图数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "\n",
    "# 使用PCA提取特征\n",
    "# 假设我们需要提取前10个主成分\n",
    "pca = PCA(n_components=10)\n",
    "pca.fit(ir_data)\n",
    "pca_features = pca.transform(ir_data)\n",
    "\n",
    "# 使用小波变换提取特征\n",
    "# 假设使用Daubechies 4小波基函数，并提取前3个近似系数\n",
    "wavelet = pywt.Wavelet('db4')\n",
    "coeffs = pywt.wavedec(np.array(ir_data), wavelet, level=3)\n",
    "wt_features = coeffs[0]\n",
    "\n",
    "# 将特征转换成DataFrame格式\n",
    "pca_features_df = pd.DataFrame(pca_features, columns=['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6', 'PC7', 'PC8', 'PC9', 'PC10'])\n",
    "\n",
    "# 小波变换特征提取\n",
    "# coeffs[0]是第一层的近似系数，其形状是(244, 200)\n",
    "# 所以需要调整DataFrame的列数\n",
    "wt_features_df = pd.DataFrame(coeffs[0], columns=['WT' + str(i) for i in range(200)])\n",
    "\n",
    "# 读取'番泻苷含量'工作表\n",
    "targets = pd.read_excel(xls, '番泻苷含量')\n",
    "# 选取番泻苷A作为目标变量\n",
    "targets = targets[['番泻苷B']]\n",
    "\n",
    "# 对特征集和目标变量集进行索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# 定义算法\n",
    "def kennardstonealgorithm(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 调用Kennard-Stone算法  5：1\n",
    "a = kennardstonealgorithm(pca_features_df.values, 336) #5：1 336\n",
    "train_indices = a[0]\n",
    "test_indices = a[1]\n",
    "\n",
    "# 使用索引分割特征和目标变量\n",
    "X_train = pca_features_df.iloc[train_indices]\n",
    "X_test = pca_features_df.iloc[test_indices]\n",
    "y_train = targets.iloc[train_indices]\n",
    "y_test = targets.iloc[test_indices]\n",
    "\n",
    "# 确保 X_train 和 X_test 的形状与模型兼容\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "\n",
    "# 构建 ACLSTM 模型\n",
    "model_aclstm = Sequential()\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu'))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(LSTM(128, return_sequences=True))\n",
    "model_aclstm.add(LSTM(128))\n",
    "model_aclstm.add(Dense(1))\n",
    "model_aclstm.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0)\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "# 测试所需时间\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "8dc461ef",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "invalid syntax (3284072653.py, line 153)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;36m  Cell \u001b[1;32mIn[7], line 153\u001b[1;36m\u001b[0m\n\u001b[1;33m    print(f\"Testing time: {test_time} seconds\")import time\u001b[0m\n\u001b[1;37m                                               ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 设置早停机制\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "75dd72b6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 168ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 7.104324594060398\n",
      "RMSEp (预测均方根误差): 8.77399332060585\n",
      "Rcal (校正集相关系数): 0.9896609780720633\n",
      "Rval (验证集相关系数): 0.9793516888379581\n",
      "RPD (相对预测偏差): 4.803708874591093\n",
      "Training time: 202.7198462486267 seconds\n",
      "Testing time: 0.7025773525238037 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# 读取红外谱图数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "\n",
    "# 使用PCA提取特征\n",
    "# 假设我们需要提取前10个主成分\n",
    "pca = PCA(n_components=10)\n",
    "pca.fit(ir_data)\n",
    "pca_features = pca.transform(ir_data)\n",
    "\n",
    "# 使用小波变换提取特征\n",
    "# 假设使用Daubechies 4小波基函数，并提取前3个近似系数\n",
    "wavelet = pywt.Wavelet('db4')\n",
    "coeffs = pywt.wavedec(np.array(ir_data), wavelet, level=3)\n",
    "wt_features = coeffs[0]\n",
    "\n",
    "# 将特征转换成DataFrame格式\n",
    "pca_features_df = pd.DataFrame(pca_features, columns=['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6', 'PC7', 'PC8', 'PC9', 'PC10'])\n",
    "\n",
    "# 小波变换特征提取\n",
    "# coeffs[0]是第一层的近似系数，其形状是(244, 200)\n",
    "# 所以需要调整DataFrame的列数\n",
    "wt_features_df = pd.DataFrame(coeffs[0], columns=['WT' + str(i) for i in range(200)])\n",
    "\n",
    "# 读取'番泻苷含量'工作表\n",
    "targets = pd.read_excel(xls, '番泻苷含量')\n",
    "# 选取番泻苷A作为目标变量\n",
    "targets = targets[['番泻苷A']]\n",
    "\n",
    "# 对特征集和目标变量集进行索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# 定义算法\n",
    "def kennardstonealgorithm(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 调用Kennard-Stone算法  5：1\n",
    "a = kennardstonealgorithm(pca_features_df.values, 336) #5：1 336\n",
    "train_indices = a[0]\n",
    "test_indices = a[1]\n",
    "\n",
    "# 使用索引分割特征和目标变量\n",
    "X_train = pca_features_df.iloc[train_indices]\n",
    "X_test = pca_features_df.iloc[test_indices]\n",
    "y_train = targets.iloc[train_indices]\n",
    "y_test = targets.iloc[test_indices]\n",
    "\n",
    "# 确保 X_train 和 X_test 的形状与模型兼容\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "\n",
    "# 构建 ACLSTM 模型\n",
    "model_aclstm = Sequential()\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu'))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(LSTM(128, return_sequences=True))\n",
    "model_aclstm.add(LSTM(128))\n",
    "model_aclstm.add(Dense(1))\n",
    "model_aclstm.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0)\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "# 测试所需时间\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "1a7d3ac4",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数： 422\n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 165ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 23.99416436641113\n",
      "RMSEp (预测均方根误差): 47.09161224872966\n",
      "Rcal (校正集相关系数): 0.9332940936101647\n",
      "Rval (验证集相关系数): 0.7896492211629654\n",
      "RPD (相对预测偏差): 1.5659909939741803\n",
      "Training time: 92.71969270706177 seconds\n",
      "Testing time: 0.7859737873077393 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷B']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "# 构建模型\n",
    "def build_cnn_lstm_model(input_shape):\n",
    "    model = Sequential([\n",
    "        Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=input_shape),\n",
    "        BatchNormalization(),\n",
    "        LeakyReLU(alpha=0.01),\n",
    "        AveragePooling1D(pool_size=2),\n",
    "        Dropout(0.2),\n",
    "        Conv1D(filters=128, kernel_size=3, activation='relu'),\n",
    "        BatchNormalization(),\n",
    "        LeakyReLU(alpha=0.01),\n",
    "        AveragePooling1D(pool_size=2),\n",
    "        Dropout(0.2),\n",
    "        LSTM(128, return_sequences=True),\n",
    "        LSTM(128),\n",
    "        Dense(1)\n",
    "    ])\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "model_cnn_lstm = build_cnn_lstm_model(X_train.shape[1:])\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "history = model_cnn_lstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数： {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_cnn_lstm = model_cnn_lstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_cnn_lstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_cnn_lstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "6fc16f83",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:1152: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().\n",
      "  return fit_method(estimator, *args, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RMSE: 24.277649473670262\n",
      "R2: 0.7878204863586958\n",
      "Training time: 63.28445029258728 seconds\n",
      "Testing time: 0.01976752281188965 seconds\n"
     ]
    },
    {
     "ename": "AttributeError",
     "evalue": "'numpy.ndarray' object has no attribute 'columns'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[32], line 55\u001b[0m\n\u001b[0;32m     52\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTesting time: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtest_time\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m seconds\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m     54\u001b[0m \u001b[38;5;66;03m# 特征重要性排序\u001b[39;00m\n\u001b[1;32m---> 55\u001b[0m feature_importances \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mDataFrame(feature_importances\u001b[38;5;241m.\u001b[39mimportances_mean, index\u001b[38;5;241m=\u001b[39m\u001b[43mX_test\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcolumns\u001b[49m, columns\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mimportance\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[0;32m     56\u001b[0m feature_importances \u001b[38;5;241m=\u001b[39m feature_importances\u001b[38;5;241m.\u001b[39msort_values(by\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mimportance\u001b[39m\u001b[38;5;124m'\u001b[39m, ascending\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[0;32m     57\u001b[0m \u001b[38;5;28mprint\u001b[39m(feature_importances)\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'numpy.ndarray' object has no attribute 'columns'"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "import numpy as np\n",
    "import pywt\n",
    "from sklearn.ensemble import RandomForestRegressor\n",
    "from sklearn.inspection import permutation_importance\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 数据预处理\n",
    "scaler = StandardScaler()\n",
    "ir_data_scaled = scaler.fit_transform(ir_data)\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = train_test_split(range(len(ir_data_scaled)), test_size=0.2, random_state=42)\n",
    "X_train, X_test = ir_data_scaled[train_indices], ir_data_scaled[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 构建模型\n",
    "model = RandomForestRegressor(n_estimators=100, random_state=42)\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "model.fit(X_train, y_train)\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred = model.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "rmse = np.sqrt(mean_squared_error(y_test, y_pred))\n",
    "r2 = r2_score(y_test, y_pred)\n",
    "\n",
    "# 特征重要性\n",
    "feature_importances = permutation_importance(model, X_test, y_test, n_repeats=30, random_state=42)\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSE: {rmse}\")\n",
    "print(f\"R2: {r2}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n",
    "\n",
    "# 特征重要性排序\n",
    "feature_importances = pd.DataFrame(feature_importances.importances_mean, index=X_test.columns, columns=['importance'])\n",
    "feature_importances = feature_importances.sort_values(by='importance', ascending=False)\n",
    "print(feature_importances)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "90a1fcf7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数： 338\n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 151ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 7.94639060190332\n",
      "RMSEp (预测均方根误差): 9.060535284021233\n",
      "Rcal (校正集相关系数): 0.9872867016550579\n",
      "Rval (验证集相关系数): 0.9786697136681966\n",
      "RPD (相对预测偏差): 4.777471907346083\n",
      "Training time: 70.37490773200989 seconds\n",
      "Testing time: 0.6495051383972168 seconds\n"
     ]
    }
   ],
   "source": [
    "# 导入必要的库\n",
    "import pywt\n",
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    # 小波变换\n",
    "    wavelet_features = pywt.wavedec(data, 'db2', level=1)\n",
    "    low_freq_features = wavelet_features[0]\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(low_freq_features)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "# 构建模型\n",
    "def build_aclstm_model(input_shape):\n",
    "    model = Sequential([\n",
    "        Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=input_shape),\n",
    "        BatchNormalization(),\n",
    "        LeakyReLU(alpha=0.01),\n",
    "        AveragePooling1D(pool_size=2),\n",
    "        Dropout(0.2),\n",
    "        Conv1D(filters=128, kernel_size=3, activation='relu'),\n",
    "        BatchNormalization(),\n",
    "        LeakyReLU(alpha=0.01),\n",
    "        AveragePooling1D(pool_size=2),\n",
    "        Dropout(0.2),\n",
    "        LSTM(128, return_sequences=True),\n",
    "        LSTM(128),\n",
    "        Dense(1)\n",
    "    ])\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "model_aclstm = build_aclstm_model(X_train.shape[1:])\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数： {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_aclstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_aclstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "2cdd4f06",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数： 477\n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 161ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 6ms/step\n",
      "RMSEc (校正均方根误差): 10.958078487883279\n",
      "RMSEp (预测均方根误差): 12.895529315335414\n",
      "Rcal (校正集相关系数): 0.9864179853338597\n",
      "Rval (验证集相关系数): 0.9843833306493162\n",
      "RPD (相对预测偏差): 5.671257335707117\n",
      "Training time: 97.60751676559448 seconds\n",
      "Testing time: 0.7274272441864014 seconds\n"
     ]
    }
   ],
   "source": [
    "# 导入必要的库\n",
    "import pywt\n",
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷B']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    # ... (添加近红外光谱预处理代码)\n",
    "    # 小波变换\n",
    "    wavelet_features = pywt.wavedec(data, 'db2', level=1)\n",
    "    low_freq_features = wavelet_features[0]\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(low_freq_features)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "# 构建模型\n",
    "def build_aclstm_model(input_shape):\n",
    "    model = Sequential([\n",
    "        Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=input_shape),\n",
    "        BatchNormalization(),\n",
    "        LeakyReLU(alpha=0.01),\n",
    "        AveragePooling1D(pool_size=2),\n",
    "        Dropout(0.2),\n",
    "        Conv1D(filters=128, kernel_size=3, activation='relu'),\n",
    "        BatchNormalization(),\n",
    "        LeakyReLU(alpha=0.01),\n",
    "        AveragePooling1D(pool_size=2),\n",
    "        Dropout(0.2),\n",
    "        LSTM(128, return_sequences=True),\n",
    "        LSTM(128),\n",
    "        Dense(1)\n",
    "    ])\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "model_aclstm = build_aclstm_model(X_train.shape[1:])\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数： {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_aclstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_aclstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "17d82f1d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 235\n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 14ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 5.608493276452087\n",
      "RMSEp (预测均方根误差): 8.406360561306277\n",
      "Rcal (校正集相关系数): 0.9936003384913615\n",
      "Rval (验证集相关系数): 0.9801084447565422\n",
      "RPD (相对预测偏差): 5.013787984993105\n",
      "Training time: 46.6107611656189 seconds\n",
      "Testing time: 0.13644886016845703 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 设置早停机制\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "59501258",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:99: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(\n",
      "C:\\Users\\30382\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\keras\\src\\layers\\activations\\leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated. Use `negative_slope` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 333ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 6.9784521583313035\n",
      "RMSEp (预测均方根误差): 12.750158407507909\n",
      "Rcal (校正集相关系数): 0.9900936719594181\n",
      "Rval (验证集相关系数): 0.9597749852142649\n",
      "RPD (相对预测偏差): 3.2988024851991096\n",
      "Training time: 209.33010911941528 seconds\n",
      "Testing time: 0.7087905406951904 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, BatchNormalization, LeakyReLU, AveragePooling1D, Dropout\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "import pywt\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# 读取红外谱图数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "\n",
    "# 使用PCA提取特征\n",
    "# 假设我们需要提取前10个主成分\n",
    "pca = PCA(n_components=10)\n",
    "pca.fit(ir_data)\n",
    "pca_features = pca.transform(ir_data)\n",
    "\n",
    "# 使用小波变换提取特征\n",
    "# 假设使用Daubechies 4小波基函数，并提取前3个近似系数\n",
    "wavelet = pywt.Wavelet('db4')\n",
    "coeffs = pywt.wavedec(np.array(ir_data), wavelet, level=3)\n",
    "wt_features = coeffs[0]\n",
    "\n",
    "# 将特征转换成DataFrame格式\n",
    "pca_features_df = pd.DataFrame(pca_features, columns=['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6', 'PC7', 'PC8', 'PC9', 'PC10'])\n",
    "\n",
    "# 小波变换特征提取\n",
    "# coeffs[0]是第一层的近似系数，其形状是(244, 200)\n",
    "# 所以需要调整DataFrame的列数\n",
    "wt_features_df = pd.DataFrame(coeffs[0], columns=['WT' + str(i) for i in range(200)])\n",
    "\n",
    "# 读取'番泻苷含量'工作表\n",
    "targets = pd.read_excel(xls, '番泻苷含量')\n",
    "# 选取番泻苷A作为目标变量\n",
    "targets = targets[['番泻苷A']]\n",
    "\n",
    "# 对特征集和目标变量集进行索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# 定义算法\n",
    "def kennardstonealgorithm(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 调用Kennard-Stone算法  \n",
    "a = kennardstonealgorithm(pca_features_df.values, 352) #9：1 336  7：1  352\n",
    "train_indices = a[0]\n",
    "test_indices = a[1]\n",
    "\n",
    "# 使用索引分割特征和目标变量\n",
    "X_train = pca_features_df.iloc[train_indices]\n",
    "X_test = pca_features_df.iloc[test_indices]\n",
    "y_train = targets.iloc[train_indices]\n",
    "y_test = targets.iloc[test_indices]\n",
    "\n",
    "# 确保 X_train 和 X_test 的形状与模型兼容\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "\n",
    "# 构建 ACLSTM 模型\n",
    "model_aclstm = Sequential()\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(X_train.shape[1], 1)))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(Conv1D(filters=128, kernel_size=3, activation='relu'))\n",
    "model_aclstm.add(BatchNormalization())\n",
    "model_aclstm.add(LeakyReLU(alpha=0.01))\n",
    "model_aclstm.add(AveragePooling1D(pool_size=2))\n",
    "model_aclstm.add(Dropout(0.2))\n",
    "model_aclstm.add(LSTM(128, return_sequences=True))\n",
    "model_aclstm.add(LSTM(128))\n",
    "model_aclstm.add(Dense(1))\n",
    "model_aclstm.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0)\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "# 测试所需时间\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "a0122108",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 175\n",
      "\u001b[1m2/2\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 2ms/step \n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 3ms/step \n",
      "RMSEc (校正均方根误差): 5.342671786384574\n",
      "RMSEp (预测均方根误差): 7.395313031077606\n",
      "Rcal (校正集相关系数): 0.9941093991253611\n",
      "Rval (验证集相关系数): 0.9859631831640541\n",
      "RPD (相对预测偏差): 5.68742040595415\n",
      "Training time: 35.801713705062866 seconds\n",
      "Testing time: 0.06793689727783203 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "\n",
    "# 开始训练前的时间\n",
    "start_train_time = time.time()\n",
    "\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "\n",
    "# 设置早停机制\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100)\n",
    "\n",
    "# 训练模型\n",
    "history = model_aclstm.fit(X_train, y_train, epochs=1000, batch_size=10, validation_split=0.2, verbose=0, callbacks=[early_stopping])\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "# 结束训练后的时间\n",
    "end_train_time = time.time()\n",
    "\n",
    "# 训练所需时间\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 开始测试前的时间\n",
    "start_test_time = time.time()\n",
    "\n",
    "# 测试模型\n",
    "y_pred_aclstm = model_aclstm.predict(X_test)\n",
    "\n",
    "# 结束测试后的时间\n",
    "end_test_time = time.time()\n",
    "\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算校正集和测试集的性能指标\n",
    "rmsec = np.sqrt(mean_squared_error(y_train.values, model_aclstm.predict(X_train)))\n",
    "rmsep = np.sqrt(mean_squared_error(y_test.values, y_pred_aclstm))  # 使用 y_pred_aclstm 而不是 y_pred_test\n",
    "r_cal = pearsonr(y_train.values.ravel(), model_aclstm.predict(X_train).ravel())[0]  # 相关系数\n",
    "r_val = pearsonr(y_test.values.ravel(), y_pred_aclstm.ravel())[0]  # 相关系数\n",
    "std_dev = np.std(y_test.values)  # 标准偏差\n",
    "RPD = std_dev / rmsep  # 相对预测偏差\n",
    "\n",
    "# 输出模型性能指标\n",
    "print(\"RMSEc (校正均方根误差):\", rmsec)\n",
    "print(\"RMSEp (预测均方根误差):\", rmsep)\n",
    "print(\"Rcal (校正集相关系数):\", r_cal)\n",
    "print(\"Rval (验证集相关系数):\", r_val)\n",
    "print(\"RPD (相对预测偏差):\", RPD)\n",
    "\n",
    "\n",
    "# 打印时间\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
