{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1207e237-fbad-4465-b3ac-422acbcc699f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "from hyperopt import fmin, tpe, hp, Trials, STATUS_OK\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A', '番泻苷B']]\n",
    "\n",
    "# 数据预处理\n",
    "X = ir_data.values.astype(np.float32)\n",
    "y = targets.values.astype(np.float32)\n",
    "\n",
    "# 划分训练集和测试集\n",
    "X_train, X_test, y_train, y_test = train_test_split(\n",
    "    X, y, test_size=0.2, random_state=42\n",
    ")\n",
    "\n",
    "# 转换为PyTorch Tensor\n",
    "X_train = torch.Tensor(X_train).unsqueeze(1)  # [batch, 1, features]\n",
    "X_test = torch.Tensor(X_test).unsqueeze(1)\n",
    "y_train = torch.Tensor(y_train)\n",
    "y_test = torch.Tensor(y_test)\n",
    "\n",
    "# 定义CNN-GRU模型\n",
    "class CNNGRU(nn.Module):\n",
    "    def __init__(self, input_dim, output_dim, lr=0.001, batch_size=32, l2_lambda=0.001):\n",
    "        super().__init__()\n",
    "        # 卷积层\n",
    "        self.conv_block = nn.Sequential(\n",
    "            nn.Conv1d(1, 32, kernel_size=5, padding=2),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool1d(2),\n",
    "            nn.Conv1d(32, 64, kernel_size=5, padding=2),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool1d(2)\n",
    "        )\n",
    "        # GRU层\n",
    "        self.gru = nn.GRU(64, 128, num_layers=2, batch_first=True)\n",
    "        # 全连接层\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(128, 64),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(64, output_dim)\n",
    "        )\n",
    "        \n",
    "        # 超参数\n",
    "        self.lr = lr\n",
    "        self.batch_size = batch_size\n",
    "        self.l2_lambda = l2_lambda\n",
    "\n",
    "    def forward(self, x):\n",
    "        # 卷积部分\n",
    "        x = self.conv_block(x)        # 输出形状: [batch, 64, features//4]\n",
    "        # 调整维度输入GRU\n",
    "        x = x.permute(0, 2, 1)        # [batch, seq_len, features]\n",
    "        _, h_n = self.gru(x)          # h_n形状: [num_layers, batch, hidden_size]\n",
    "        # 取最后一层隐藏状态\n",
    "        x = h_n[-1]                   # [batch, hidden_size]\n",
    "        # 全连接层\n",
    "        return self.fc(x)\n",
    "\n",
    "# 修正后的MTMSE损失函数\n",
    "def MTMSE_loss(y_pred, y_true, weights):\n",
    "    mse1 = nn.MSELoss()(y_pred[:, 0], y_true[:, 0])\n",
    "    mse2 = nn.MSELoss()(y_pred[:, 1], y_true[:, 1])\n",
    "    return weights[0] * mse1 + weights[1] * mse2  # 分别应用权重\n",
    "\n",
    "# 贝叶斯优化参数空间\n",
    "space = {\n",
    "    'lr': hp.loguniform('lr', np.log(1e-4), np.log(0.01)),\n",
    "    'batch_size': hp.choice('batch_size', [16, 32, 64]),\n",
    "    'l2_lambda': hp.uniform('l2_lambda', 0.0, 0.01)\n",
    "}\n",
    "\n",
    "def objective(params):\n",
    "    # 设备配置\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    \n",
    "    # 模型初始化\n",
    "    model = CNNGRU(\n",
    "        input_dim=X_train.shape[2],  # 特征维度\n",
    "        output_dim=2,\n",
    "        lr=params['lr'],\n",
    "        batch_size=params['batch_size'],\n",
    "        l2_lambda=params['l2_lambda']\n",
    "    ).to(device)\n",
    "    \n",
    "    # 权重计算（转换为Python标量）\n",
    "    y1_mean = y_train[:, 0].mean().item()\n",
    "    y2_mean = y_train[:, 1].mean().item()\n",
    "    y1_std = y_train[:, 0].std().item()\n",
    "    y2_std = y_train[:, 1].std().item()\n",
    "    w1 = (np.log10(y2_mean)/np.log10(y1_mean)) * np.sqrt(y2_std/y1_std)\n",
    "    w1 = w1/(1 + w1)\n",
    "    weights = [w1, 1 - w1]\n",
    "    \n",
    "    # 优化器\n",
    "    optimizer = optim.Adam(\n",
    "        model.parameters(), \n",
    "        lr=params['lr'],\n",
    "        weight_decay=params['l2_lambda']\n",
    "    )\n",
    "    \n",
    "    # 数据加载器\n",
    "    train_loader = DataLoader(\n",
    "        TensorDataset(X_train.to(device), y_train.to(device)),\n",
    "        batch_size=params['batch_size'],\n",
    "        shuffle=True\n",
    "    )\n",
    "    \n",
    "    # 训练循环\n",
    "    model.train()\n",
    "    for epoch in range(500):  # 贝叶斯优化时减少epoch数\n",
    "        for X_batch, y_batch in train_loader:\n",
    "            optimizer.zero_grad()\n",
    "            y_pred = model(X_batch)\n",
    "            loss = MTMSE_loss(y_pred, y_batch, weights)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "    \n",
    "    # 验证损失\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        y_val_pred = model(X_test.to(device))\n",
    "        val_loss = MTMSE_loss(y_val_pred, y_test.to(device), weights)\n",
    "    \n",
    "    return {'loss': val_loss.item(), 'status': STATUS_OK}\n",
    "\n",
    "# 运行贝叶斯优化\n",
    "trials = Trials()\n",
    "best = fmin(\n",
    "    fn=objective,\n",
    "    space=space,\n",
    "    algo=tpe.suggest,\n",
    "    max_evals=30,\n",
    "    trials=trials\n",
    ")\n",
    "\n",
    "# 最终模型训练\n",
    "final_model = CNNGRU(\n",
    "    input_dim=X_train.shape[2],\n",
    "    output_dim=2,\n",
    "    lr=best['lr'],\n",
    "    batch_size=best['batch_size'],\n",
    "    l2_lambda=best['l2_lambda']\n",
    ").to(device)\n",
    "\n",
    "optimizer = optim.Adam(\n",
    "    final_model.parameters(), \n",
    "    lr=best['lr'],\n",
    "    weight_decay=best['l2_lambda']\n",
    ")\n",
    "\n",
    "train_loader = DataLoader(\n",
    "    TensorDataset(X_train.to(device), y_train.to(device)),\n",
    "    batch_size=best['batch_size'],\n",
    "    shuffle=True\n",
    ")\n",
    "\n",
    "# 早停机制\n",
    "best_val_loss = float('inf')\n",
    "early_stop_counter = 0\n",
    "\n",
    "for epoch in range(2000):\n",
    "    # 训练\n",
    "    final_model.train()\n",
    "    for X_batch, y_batch in train_loader:\n",
    "        optimizer.zero_grad()\n",
    "        y_pred = final_model(X_batch)\n",
    "        loss = MTMSE_loss(y_pred, y_batch, weights)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "    \n",
    "    # 验证\n",
    "    final_model.eval()\n",
    "    with torch.no_grad():\n",
    "        y_val_pred = final_model(X_test.to(device))\n",
    "        val_loss = MTMSE_loss(y_val_pred, y_test.to(device), weights)\n",
    "    \n",
    "    # 早停判断\n",
    "    if val_loss < best_val_loss:\n",
    "        best_val_loss = val_loss\n",
    "        early_stop_counter = 0\n",
    "    else:\n",
    "        early_stop_counter += 1\n",
    "        if early_stop_counter >= 100:\n",
    "            break\n",
    "\n",
    "# 评估函数\n",
    "def evaluate(model, X, y, device):\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        y_pred = model(X.to(device)).cpu().numpy()\n",
    "        y_true = y.numpy()\n",
    "        rmse = [\n",
    "            np.sqrt(mean_squared_error(y_true[:, i], y_pred[:, i]))\n",
    "            for i in range(y_true.shape[1])\n",
    "        ]\n",
    "        r = [\n",
    "            pearsonr(y_true[:, i], y_pred[:, i])[0]\n",
    "            for i in range(y_true.shape[1])\n",
    "        ]\n",
    "    return rmse, r\n",
    "\n",
    "train_rmse, train_r = evaluate(final_model, X_train, y_train, device)\n",
    "test_rmse, test_r = evaluate(final_model, X_test, y_test, device)\n",
    "\n",
    "print(f\"训练集结果:\")\n",
    "print(f\"  RMSE: A={train_rmse[0]:.4f}, B={train_rmse[1]:.4f}\")\n",
    "print(f\"  Pearson R: A={train_r[0]:.4f}, B={train_r[1]:.4f}\\n\")\n",
    "\n",
    "print(f\"测试集结果:\")\n",
    "print(f\"  RMSE: A={test_rmse[0]:.4f}, B={test_rmse[1]:.4f}\")\n",
    "print(f\"  Pearson R: A={test_r[0]:.4f}, B={test_r[1]:.4f}\")"
   ]
  },
  {
   "cell_type": "raw",
   "id": "c5d77d94-ca16-46f2-b108-baf1d98bcab9",
   "metadata": {},
   "source": [
    "alstm模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "617773dc-0742-4220-b41c-e3b1e46eb534",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:200: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(**kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\backend\\tensorflow\\core.py:216: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (None, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n",
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (None, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n",
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (None, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 549\n",
      "\u001b[1m1/3\u001b[0m \u001b[32m━━━━━━\u001b[0m\u001b[37m━━━━━━━━━━━━━━\u001b[0m \u001b[1m0s\u001b[0m 231ms/step"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (32, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 137ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 5ms/step \n",
      "RMSEc (校正均方根误差): 22.703880525952176\n",
      "RMSEp (预测均方根误差): 27.5564950770797\n",
      "Rcal (校正集相关系数): 0.8984885853063065\n",
      "Rval (验证集相关系数): 0.7950065432352565\n",
      "RPD (相对预测偏差): 1.5295018274967032\n",
      "Training time: 127.97389054298401 seconds\n",
      "Testing time: 0.5664646625518799 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D  # 导入 MaxPooling1D\n",
    "from tensorflow.keras.layers import LSTM, Flatten  # 导入 Flatten 和 LSTM\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.layers import Layer\n",
    "\n",
    "# 自定义注意力机制层\n",
    "class TemporalAttention(Layer):\n",
    "    def __init__(self, units=32, **kwargs):\n",
    "        super(TemporalAttention, self).__init__(**kwargs)\n",
    "        self.W1 = Dense(units, activation='tanh')\n",
    "        self.W2 = Dense(1, activation='softmax')\n",
    "\n",
    "    def call(self, inputs):\n",
    "        # inputs形状: (batch_size, time_steps, features)\n",
    "        x = self.W1(inputs)  # 非线性变换\n",
    "        attention_scores = self.W2(x)  # 计算注意力权重\n",
    "        attention_scores = tf.transpose(attention_scores, perm=[0, 2, 1])\n",
    "        context_vector = tf.matmul(attention_scores, inputs)\n",
    "        return tf.squeeze(context_vector, axis=1)\n",
    "\n",
    "# 修改后的ALSTM模型\n",
    "def ALSTM_model(input_shape):\n",
    "    model = Sequential([\n",
    "        LSTM(64, return_sequences=True, input_shape=input_shape),  # 需要返回完整序列\n",
    "        TemporalAttention(units=32),  # 时间注意力机制\n",
    "        Dense(32, activation='relu'),\n",
    "        Dense(1)\n",
    "    ])\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "X_train = pca_features_df.iloc[train_indices].values.reshape(-1, 10, 1)\n",
    "X_test = pca_features_df.iloc[test_indices].values.reshape(-1, 10, 1)\n",
    "\n",
    "# 创建并训练模型\n",
    "model_alstm = ALSTM_model((10, 1))  # 输入形状：10个时间步，每个时间步1个特征\n",
    "\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100, verbose=0)\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "history = model_alstm.fit(\n",
    "    X_train, y_train,\n",
    "    epochs=1000,\n",
    "    batch_size=10,\n",
    "    validation_split=0.2,\n",
    "    verbose=0,\n",
    "    callbacks=[early_stopping]\n",
    ")\n",
    "\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_lstm = model_alstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_alstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_lstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "432a05df-56e1-4571-9798-228e2f6dec79",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\layers\\rnn\\rnn.py:200: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(**kwargs)\n",
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (None, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n",
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (None, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n",
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (None, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳epochs轮数: 611\n",
      "\u001b[1m1/3\u001b[0m \u001b[32m━━━━━━\u001b[0m\u001b[37m━━━━━━━━━━━━━━\u001b[0m \u001b[1m0s\u001b[0m 425ms/step"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\ops\\nn.py:907: UserWarning: You are using a softmax over axis -1 of a tensor of shape (32, 10, 1). This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 244ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 8ms/step\n",
      "RMSEc (校正均方根误差): 27.778650651930437\n",
      "RMSEp (预测均方根误差): 50.60917043176008\n",
      "Rcal (校正集相关系数): 0.9104160777332614\n",
      "Rval (验证集相关系数): 0.7501822628200333\n",
      "RPD (相对预测偏差): 1.4571477865394078\n",
      "Training time: 154.52311635017395 seconds\n",
      "Testing time: 1.0083062648773193 seconds\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D  # 导入 MaxPooling1D\n",
    "from tensorflow.keras.layers import LSTM, Flatten  # 导入 Flatten 和 LSTM\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.layers import Layer\n",
    "\n",
    "# 自定义注意力机制层\n",
    "class TemporalAttention(Layer):\n",
    "    def __init__(self, units=32, **kwargs):\n",
    "        super(TemporalAttention, self).__init__(**kwargs)\n",
    "        self.W1 = Dense(units, activation='tanh')\n",
    "        self.W2 = Dense(1, activation='softmax')\n",
    "\n",
    "    def call(self, inputs):\n",
    "        # inputs形状: (batch_size, time_steps, features)\n",
    "        x = self.W1(inputs)  # 非线性变换\n",
    "        attention_scores = self.W2(x)  # 计算注意力权重\n",
    "        attention_scores = tf.transpose(attention_scores, perm=[0, 2, 1])\n",
    "        context_vector = tf.matmul(attention_scores, inputs)\n",
    "        return tf.squeeze(context_vector, axis=1)\n",
    "\n",
    "# 修改后的ALSTM模型\n",
    "def ALSTM_model(input_shape):\n",
    "    model = Sequential([\n",
    "        LSTM(64, return_sequences=True, input_shape=input_shape),  # 需要返回完整序列\n",
    "        TemporalAttention(units=32),  # 时间注意力机制\n",
    "        Dense(32, activation='relu'),\n",
    "        Dense(1)\n",
    "    ])\n",
    "    model.compile(optimizer=Adam(learning_rate=1e-3), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷B']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "\n",
    "\n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状\n",
    "X_train = np.expand_dims(X_train, axis=2)\n",
    "X_test = np.expand_dims(X_test, axis=2)\n",
    "\n",
    "X_train = pca_features_df.iloc[train_indices].values.reshape(-1, 10, 1)\n",
    "X_test = pca_features_df.iloc[test_indices].values.reshape(-1, 10, 1)\n",
    "\n",
    "# 创建并训练模型\n",
    "model_alstm = ALSTM_model((10, 1))  # 输入形状：10个时间步，每个时间步1个特征\n",
    "\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100, verbose=0)\n",
    "\n",
    "# 训练模型\n",
    "start_train_time = time.time()\n",
    "history = model_alstm.fit(\n",
    "    X_train, y_train,\n",
    "    epochs=1000,\n",
    "    batch_size=10,\n",
    "    validation_split=0.2,\n",
    "    verbose=0,\n",
    "    callbacks=[early_stopping]\n",
    ")\n",
    "\n",
    "\n",
    "# 获取最佳epochs轮数\n",
    "best_epoch = early_stopping.stopped_epoch + 1  # +1 因为stopped_epoch是从0开始的\n",
    "print(f\"最佳epochs轮数: {best_epoch}\")\n",
    "\n",
    "end_train_time = time.time()\n",
    "train_time = end_train_time - start_train_time\n",
    "\n",
    "# 测试模型\n",
    "start_test_time = time.time()\n",
    "y_pred_lstm = model_alstm.predict(X_test)\n",
    "end_test_time = time.time()\n",
    "test_time = end_test_time - start_test_time\n",
    "\n",
    "# 计算性能指标\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r = pearsonr(y_true.ravel(), y_pred.ravel())[0]\n",
    "    return rmse, r\n",
    "\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, model_alstm.predict(X_train))\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_pred_lstm)\n",
    "RPD = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出性能指标\n",
    "print(f\"RMSEc (校正均方根误差): {rmsec}\\nRMSEp (预测均方根误差): {rmsep}\\nRcal (校正集相关系数): {r_cal}\\nRval (验证集相关系数): {r_val}\\nRPD (相对预测偏差): {RPD}\")\n",
    "print(f\"Training time: {train_time} seconds\")\n",
    "print(f\"Testing time: {test_time} seconds\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4892e5b0-d33a-464b-83d0-96cf4727d459",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "dcabe0a4-6dc7-46a3-99f0-53cd39de1df1",
   "metadata": {},
   "source": [
    "clstm模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "48ac34e0-97c6-4410-a548-762ec94f3c2a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(activity_regularizer=activity_regularizer, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 212ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step \n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 11ms/step\n",
      "\n",
      "RMSEc: 10.3960\n",
      "RMSEp: 9.9081\n",
      "R_cal: 0.9786\n",
      "R_val: 0.9726\n",
      "RPD: 4.25\n",
      "训练时间: 105.42秒\n",
      "预测时间: 0.8766秒\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷A']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "    \n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状为（样本数，时间步长，特征维度）\n",
    "X_train = X_train.values.reshape(-1, 10, 1)  # 10个时间步，每个时间步1个特征\n",
    "X_test = X_test.values.reshape(-1, 10, 1)\n",
    "\n",
    "# 构建CLSTM模型\n",
    "def CLSTM_model(input_shape, output_shape):\n",
    "    model = Sequential()\n",
    "    # 第一层卷积-LSTM组合\n",
    "    model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=input_shape))\n",
    "    model.add(MaxPooling1D(pool_size=2))  # 添加池化层\n",
    "    model.add(LSTM(64, return_sequences=True))\n",
    "    \n",
    "    # 第二层卷积-LSTM组合\n",
    "    model.add(Conv1D(filters=32, kernel_size=2, activation='relu'))\n",
    "    model.add(LSTM(32))\n",
    "    \n",
    "    # 全连接层\n",
    "    model.add(Dense(16, activation='relu'))\n",
    "    model.add(Dense(output_shape))\n",
    "    \n",
    "    model.compile(optimizer=Adam(learning_rate=0.001), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "# 创建模型\n",
    "model_clstm = CLSTM_model((10, 1), 1)  # 输入形状：10个时间步，每个时间步1个特征\n",
    "\n",
    "# 训练参数\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100, restore_best_weights=True)\n",
    "\n",
    "# 训练模型\n",
    "start_train = time.time()\n",
    "history = model_clstm.fit(\n",
    "    X_train, y_train,\n",
    "    epochs=1000,\n",
    "    batch_size=10,\n",
    "    validation_split=0.2,\n",
    "    callbacks=[early_stopping],\n",
    "    verbose=0\n",
    ")\n",
    "train_time = time.time() - start_train\n",
    "\n",
    "# 预测与评估\n",
    "start_test = time.time()\n",
    "y_pred = model_clstm.predict(X_test)\n",
    "test_time = time.time() - start_test\n",
    "\n",
    "\n",
    "# 计算性能指标（修正版）\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    # 确保输入为NumPy数组\n",
    "    y_true = np.array(y_true).ravel()\n",
    "    y_pred = np.array(y_pred).ravel()\n",
    "    \n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r, _ = pearsonr(y_true, y_pred)  # 显式解包相关系数和p值\n",
    "    return rmse, r\n",
    "\n",
    "# 模型预测时添加reshape\n",
    "y_train_pred = model_clstm.predict(X_train).reshape(-1)\n",
    "y_test_pred = model_clstm.predict(X_test).reshape(-1)\n",
    "\n",
    "# 计算指标\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, y_train_pred)\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_test_pred)\n",
    "rpd = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出结果\n",
    "print(f\"\\nRMSEc: {rmsec:.4f}\\nRMSEp: {rmsep:.4f}\")\n",
    "print(f\"R_cal: {r_cal:.4f}\\nR_val: {r_val:.4f}\")\n",
    "print(f\"RPD: {rpd:.2f}\")\n",
    "print(f\"训练时间: {train_time:.2f}秒\")\n",
    "print(f\"预测时间: {test_time:.4f}秒\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "cced422c-4bf5-4e70-9b90-b4045b77f668",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\Lib\\site-packages\\keras\\src\\layers\\convolutional\\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.\n",
      "  super().__init__(activity_regularizer=activity_regularizer, **kwargs)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 208ms/step\n",
      "\u001b[1m11/11\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 4ms/step \n",
      "\u001b[1m3/3\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 11ms/step\n",
      "\n",
      "RMSEc: 5.0154\n",
      "RMSEp: 15.2607\n",
      "R_cal: 0.9972\n",
      "R_val: 0.9787\n",
      "RPD: 4.8323\n",
      "训练时间: 254.00秒\n",
      "预测时间: 0.9175秒\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from scipy.stats import pearsonr\n",
    "import numpy as np\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.layers import Dense, Conv1D, LSTM, MaxPooling1D\n",
    "\n",
    "# 读取数据\n",
    "file_path = 'F:\\\\研究\\\\番泻苷在线提取数据.xlsx'\n",
    "with pd.ExcelFile(file_path) as xls:\n",
    "    ir_data = pd.read_excel(xls, '红外谱图', index_col='编号\\波数')\n",
    "    targets = pd.read_excel(xls, '番泻苷含量')[['番泻苷B']]\n",
    "\n",
    "# 特征提取\n",
    "def extract_features(data):\n",
    "    pca = PCA(n_components=10)\n",
    "    pca_features = pca.fit_transform(data)\n",
    "    return pd.DataFrame(pca_features, columns=['PC' + str(i) for i in range(1, 11)])\n",
    "\n",
    "pca_features_df = extract_features(ir_data)\n",
    "\n",
    "# 索引对齐\n",
    "pca_features_df, targets = pca_features_df.align(targets, join='inner', axis=0)\n",
    "\n",
    "# Kennard-Stone算法实现省略，假设函数名为kennard_stone_selection\n",
    "def kennard_stone_selection(x_variables, k):\n",
    "    x_variables = np.array(x_variables)\n",
    "    original_x = x_variables\n",
    "    distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(\n",
    "        axis=1)\n",
    "    max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))\n",
    "    max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "    selected_sample_numbers = list()\n",
    "    selected_sample_numbers.append(max_distance_sample_number)\n",
    "    remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)\n",
    "    x_variables = np.delete(x_variables, selected_sample_numbers, 0)\n",
    "    remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)\n",
    "    for iteration in range(1, k):\n",
    "        selected_samples = original_x[selected_sample_numbers, :]\n",
    "        min_distance_to_selected_samples = list()\n",
    "        for min_distance_calculation_number in range(0, x_variables.shape[0]):\n",
    "            distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],\n",
    "                                                                        (selected_samples.shape[0], 1))) ** 2).sum(\n",
    "                axis=1)\n",
    "            min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))\n",
    "        max_distance_sample_number = np.where(\n",
    "            min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))\n",
    "        max_distance_sample_number = max_distance_sample_number[0][0]\n",
    "        selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])\n",
    "        x_variables = np.delete(x_variables, max_distance_sample_number, 0)\n",
    "        remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)\n",
    "\n",
    "    return selected_sample_numbers, remaining_sample_numbers\n",
    "    \n",
    "# 划分数据集\n",
    "train_indices, test_indices = kennard_stone_selection(pca_features_df.values, 336)\n",
    "X_train, X_test = pca_features_df.iloc[train_indices], pca_features_df.iloc[test_indices]\n",
    "y_train, y_test = targets.iloc[train_indices], targets.iloc[test_indices]\n",
    "\n",
    "# 调整数据形状为（样本数，时间步长，特征维度）\n",
    "X_train = X_train.values.reshape(-1, 10, 1)  # 10个时间步，每个时间步1个特征\n",
    "X_test = X_test.values.reshape(-1, 10, 1)\n",
    "\n",
    "# 构建CLSTM模型\n",
    "def CLSTM_model(input_shape, output_shape):\n",
    "    model = Sequential()\n",
    "    # 第一层卷积-LSTM组合\n",
    "    model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=input_shape))\n",
    "    model.add(MaxPooling1D(pool_size=2))  # 添加池化层\n",
    "    model.add(LSTM(64, return_sequences=True))\n",
    "    \n",
    "    # 第二层卷积-LSTM组合\n",
    "    model.add(Conv1D(filters=32, kernel_size=2, activation='relu'))\n",
    "    model.add(LSTM(32))\n",
    "    \n",
    "    # 全连接层\n",
    "    model.add(Dense(16, activation='relu'))\n",
    "    model.add(Dense(output_shape))\n",
    "    \n",
    "    model.compile(optimizer=Adam(learning_rate=0.001), loss='mean_squared_error')\n",
    "    return model\n",
    "\n",
    "# 创建模型\n",
    "model_clstm = CLSTM_model((10, 1), 1)  # 输入形状：10个时间步，每个时间步1个特征\n",
    "\n",
    "# 训练参数\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=100, restore_best_weights=True)\n",
    "\n",
    "# 训练模型\n",
    "start_train = time.time()\n",
    "history = model_clstm.fit(\n",
    "    X_train, y_train,\n",
    "    epochs=1000,\n",
    "    batch_size=10,\n",
    "    validation_split=0.2,\n",
    "    callbacks=[early_stopping],\n",
    "    verbose=0\n",
    ")\n",
    "train_time = time.time() - start_train\n",
    "\n",
    "# 预测与评估\n",
    "start_test = time.time()\n",
    "y_pred = model_clstm.predict(X_test)\n",
    "test_time = time.time() - start_test\n",
    "\n",
    "\n",
    "# 计算性能指标（修正版）\n",
    "def calculate_metrics(y_true, y_pred):\n",
    "    # 确保输入为NumPy数组\n",
    "    y_true = np.array(y_true).ravel()\n",
    "    y_pred = np.array(y_pred).ravel()\n",
    "    \n",
    "    rmse = np.sqrt(mean_squared_error(y_true, y_pred))\n",
    "    r, _ = pearsonr(y_true, y_pred)  # 显式解包相关系数和p值\n",
    "    return rmse, r\n",
    "\n",
    "# 模型预测时添加reshape\n",
    "y_train_pred = model_clstm.predict(X_train).reshape(-1)\n",
    "y_test_pred = model_clstm.predict(X_test).reshape(-1)\n",
    "\n",
    "# 计算指标\n",
    "rmsec, r_cal = calculate_metrics(y_train.values, y_train_pred)\n",
    "rmsep, r_val = calculate_metrics(y_test.values, y_test_pred)\n",
    "rpd = np.std(y_test.values) / rmsep\n",
    "\n",
    "# 输出结果\n",
    "print(f\"\\nRMSEc: {rmsec:.4f}\\nRMSEp: {rmsep:.4f}\")\n",
    "print(f\"R_cal: {r_cal:.4f}\\nR_val: {r_val:.4f}\")\n",
    "print(f\"RPD: {rpd:.4f}\")\n",
    "print(f\"训练时间: {train_time:.2f}秒\")\n",
    "print(f\"预测时间: {test_time:.4f}秒\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
