{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "bbc106ad",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "工作表名称: ['红外谱图', '蒽醌含量']\n",
      "工作表 '红外谱图' 的内容:\n",
      "  编号\\波数  4003.497  4011.211  4018.925  4026.638  4034.352  4042.066   4049.78  \\\n",
      "0   1-1  1.264708  1.288542  1.333471  1.370217  1.352400  1.333578  1.371618   \n",
      "1   1-2  1.260399  1.293289  1.360664  1.370719  1.353501  1.321410  1.370489   \n",
      "2   1-3  1.218367  1.271432  1.299451  1.316458  1.310749  1.288309  1.347342   \n",
      "3   1-4  1.259917  1.279549  1.318026  1.315663  1.315034  1.294442  1.341085   \n",
      "4   1-5  1.227900  1.268811  1.324630  1.314813  1.320434  1.292731  1.337843   \n",
      "\n",
      "   4057.494  4065.208  ...  9935.461  9943.175  9950.889  9958.603  9966.316  \\\n",
      "0  1.414510  1.403367  ... -0.044855 -0.044209 -0.044844 -0.044485 -0.045000   \n",
      "1  1.404793  1.402450  ... -0.052916 -0.052215 -0.052981 -0.052385 -0.053302   \n",
      "2  1.396320  1.355759  ... -0.075619 -0.075195 -0.075577 -0.075273 -0.076240   \n",
      "3  1.376105  1.367473  ... -0.072897 -0.072135 -0.072650 -0.072386 -0.073184   \n",
      "4  1.386666  1.369949  ... -0.071002 -0.070456 -0.070898 -0.070672 -0.071474   \n",
      "\n",
      "    9974.03  9981.744  9989.458  9997.172  Unnamed: 779  \n",
      "0 -0.044582 -0.044907 -0.045498 -0.045294           NaN  \n",
      "1 -0.052861 -0.052986 -0.053608 -0.053561           NaN  \n",
      "2 -0.075761 -0.075899 -0.076142 -0.076000           NaN  \n",
      "3 -0.072785 -0.072895 -0.073353 -0.073166           NaN  \n",
      "4 -0.071201 -0.071496 -0.071783 -0.071296           NaN  \n",
      "\n",
      "[5 rows x 780 columns]\n",
      "工作表 '蒽醌含量' 的内容:\n",
      "  编号\\化合物       总蒽醌     芦荟大黄素       大黄酸       大黄素       大黄酚     大黄素甲醚\n",
      "0    1-1  6.385174  0.638627  2.028011  1.058405  1.753474  0.906657\n",
      "1    1-2  6.435919  0.675784  2.111518  1.098224  1.645855  0.904537\n",
      "2    1-3  6.623122  0.721373  2.340516  1.175284  1.655747  0.730202\n",
      "3    1-4  7.767245  0.841471  2.890464  1.368364  1.895988  0.770959\n",
      "4    1-5  9.247683  0.986471  3.523102  1.444994  2.186328  1.106788\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'  # 请确保文件路径正确\n",
    "\n",
    "# 使用ExcelFile类来读取Excel文件\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 获取所有工作表的名称\n",
    "sheet_names = xls.sheet_names\n",
    "print(\"工作表名称:\", sheet_names)\n",
    "\n",
    "# 遍历所有工作表并打印其内容\n",
    "for sheet_name in sheet_names:\n",
    "    # 读取每个工作表\n",
    "    sheet_data = pd.read_excel(xls, sheet_name)\n",
    "    print(f\"工作表 '{sheet_name}' 的内容:\")\n",
    "    print(sheet_data.head())  # 打印每个工作表的前几行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "ed03d8c6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "异常值样本编号： Index([ 49,  51,  57,  50,  60,  53,  45,  46,  59,  44,  56,  47,  58, 186,\n",
      "       193,  43, 189,  54, 192, 190],\n",
      "      dtype='int64')\n",
      "清理后的数据集样本数量： 383\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.impute import SimpleImputer\n",
    "import numpy as np\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 读取红外谱图数据\n",
    "nir_data = pd.read_excel(xls, '红外谱图')\n",
    "nir_data = nir_data.drop('编号\\波数', axis=1)  # 移除编号列\n",
    "\n",
    "# 读取蒽醌含量数据\n",
    "anthraquinone_data = pd.read_excel(xls, '蒽醌含量')\n",
    "anthraquinone_data = anthraquinone_data.drop('编号\\化合物', axis=1)  # 移除编号列\n",
    "\n",
    "# 合并数据集\n",
    "combined_data = pd.concat([nir_data, anthraquinone_data], axis=1)\n",
    "combined_data.columns = combined_data.columns.astype(str)  # 将所有列名转换为字符串\n",
    "\n",
    "# 使用平均值填充NaN值\n",
    "imputer = SimpleImputer(strategy='mean')\n",
    "combined_data_imputed = imputer.fit_transform(combined_data)\n",
    "\n",
    "# 使用PCA进行降维，提取主成分\n",
    "pca = PCA(n_components=10)  # 假设提取10个主成分\n",
    "combined_data_pca = pca.fit_transform(combined_data_imputed)\n",
    "\n",
    "# 计算每个样本的马氏距离\n",
    "mean = np.mean(combined_data_pca, axis=0)\n",
    "cov_matrix = np.cov(combined_data_pca, rowvar=False)\n",
    "inv_cov_matrix = np.linalg.inv(cov_matrix)\n",
    "mahalanobis_distances = np.apply_along_axis(lambda x: np.sqrt(np.dot(np.dot((x - mean), inv_cov_matrix), (x - mean).T)), axis=1, arr=combined_data_pca)\n",
    "\n",
    "# 根据马氏距离排序样本\n",
    "sorted_indices = np.argsort(mahalanobis_distances)\n",
    "\n",
    "# 选择前5%的样本作为异常值\n",
    "num_samples = combined_data_pca.shape[0]\n",
    "num_outliers = int(num_samples * 0.05)\n",
    "outlier_indices = sorted_indices[:num_outliers]\n",
    "\n",
    "# 输出异常值样本编号\n",
    "outliers = combined_data.index[outlier_indices]\n",
    "print(\"异常值样本编号：\", outliers)\n",
    "\n",
    "# 从原始数据中移除异常值\n",
    "combined_data_clean = combined_data.drop(index=outliers)\n",
    "\n",
    "# 检查清理后的数据集样本数量\n",
    "print(\"清理后的数据集样本数量：\", combined_data_clean.shape[0])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "af5599b8",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "   4003.497  4011.211  4018.925  4026.638  4034.352  4042.066   4049.78  \\\n",
      "0  1.264708  1.288542  1.333471  1.370217  1.352400  1.333578  1.371618   \n",
      "1  1.260399  1.293289  1.360664  1.370719  1.353501  1.321410  1.370489   \n",
      "2  1.218367  1.271432  1.299451  1.316458  1.310749  1.288309  1.347342   \n",
      "3  1.259917  1.279549  1.318026  1.315663  1.315034  1.294442  1.341085   \n",
      "4  1.227900  1.268811  1.324630  1.314813  1.320434  1.292731  1.337843   \n",
      "\n",
      "   4057.494  4065.208  4072.922  ...  9981.744  9989.458  9997.172  \\\n",
      "0  1.414510  1.403367  1.323938  ... -0.044907 -0.045498 -0.045294   \n",
      "1  1.404793  1.402450  1.327771  ... -0.052986 -0.053608 -0.053561   \n",
      "2  1.396320  1.355759  1.278996  ... -0.075899 -0.076142 -0.076000   \n",
      "3  1.376105  1.367473  1.289868  ... -0.072895 -0.073353 -0.073166   \n",
      "4  1.386666  1.369949  1.289854  ... -0.071496 -0.071783 -0.071296   \n",
      "\n",
      "   Unnamed: 779       总蒽醌     芦荟大黄素       大黄酸       大黄素       大黄酚     大黄素甲醚  \n",
      "0           NaN  6.385174  0.638627  2.028011  1.058405  1.753474  0.906657  \n",
      "1           NaN  6.435919  0.675784  2.111518  1.098224  1.645855  0.904537  \n",
      "2           NaN  6.623122  0.721373  2.340516  1.175284  1.655747  0.730202  \n",
      "3           NaN  7.767245  0.841471  2.890464  1.368364  1.895988  0.770959  \n",
      "4           NaN  9.247683  0.986471  3.523102  1.444994  2.186328  1.106788  \n",
      "\n",
      "[5 rows x 785 columns]\n"
     ]
    }
   ],
   "source": [
    "print(combined_data_clean.head())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "5bb2798b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Missing values after imputation:\n",
      "0\n"
     ]
    }
   ],
   "source": [
    "# 检查X_clean_imputed中是否仍然存在NaN值\n",
    "missing_values = np.isnan(X_clean_imputed).sum()\n",
    "print(\"Missing values after imputation:\")\n",
    "print(missing_values)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "f1d79f2a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "降维后的特征数量： 5\n",
      "解释的方差比例： [9.38098566e-01 5.39472072e-02 3.96407191e-03 3.43478353e-03\n",
      " 4.57717901e-04]\n"
     ]
    }
   ],
   "source": [
    "from sklearn.decomposition import PCA\n",
    "\n",
    "# 创建PCA模型并指定降维后的特征数量\n",
    "pca = PCA(n_components=5)\n",
    "\n",
    "# 使用PCA对X_clean_imputed进行降维\n",
    "X_pca = pca.fit_transform(X_clean_imputed)\n",
    "\n",
    "# 查看降维后的特征\n",
    "print(\"降维后的特征数量：\", pca.n_components_)\n",
    "print(\"解释的方差比例：\", pca.explained_variance_ratio_)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "c88f98ce",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳预处理方法： ('none', 0, 0)\n",
      "最佳R2： 0.9999920409308568\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.impute import SimpleImputer\n",
    "import numpy as np\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.cross_decomposition import PLSRegression\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "# 读取红外谱图数据\n",
    "nir_data = pd.read_excel(xls, '红外谱图')\n",
    "nir_data = nir_data.drop('编号\\波数', axis=1)  # 移除编号列\n",
    "# 读取蒽醌含量数据\n",
    "anthraquinone_data = pd.read_excel(xls, '蒽醌含量')\n",
    "anthraquinone_data = anthraquinone_data.drop('编号\\化合物', axis=1)  # 移除编号列\n",
    "# 合并数据集\n",
    "combined_data = pd.concat([nir_data, anthraquinone_data], axis=1)\n",
    "combined_data.columns = combined_data.columns.astype(str)  # 将所有列名转换为字符串\n",
    "\n",
    "# 使用平均值填充NaN值\n",
    "imputer = SimpleImputer(strategy='mean')\n",
    "combined_data_imputed = imputer.fit_transform(combined_data)\n",
    "\n",
    "# 分割数据集\n",
    "X_train, X_test, y_train, y_test = train_test_split(combined_data_imputed, anthraquinone_data, test_size=0.2, random_state=0)\n",
    "\n",
    "# 对数据进行标准化处理\n",
    "scaler = StandardScaler()\n",
    "X_train_scaled = scaler.fit_transform(X_train)\n",
    "X_test_scaled = scaler.transform(X_test)\n",
    "\n",
    "# 使用平均值填补y中的缺失值\n",
    "imputer_y = SimpleImputer(strategy='mean')\n",
    "y_train_imputed = imputer_y.fit_transform(y_train)\n",
    "y_test_imputed = imputer_y.transform(y_test)\n",
    "\n",
    "# 设计正交试验，选择最佳预处理方法\n",
    "preprocessing_methods = ['none', 'snv', 'msc']\n",
    "derivative_orders = [0, 1, 2]\n",
    "smoothing_windows = [0, 5, 11]\n",
    "\n",
    "best_r2 = 0\n",
    "best_method = None\n",
    "\n",
    "for pm in preprocessing_methods:\n",
    "    for do in derivative_orders:\n",
    "        for sw in smoothing_windows:\n",
    "            # 构建模型\n",
    "            lr = LinearRegression()\n",
    "            model = lr.fit(X_train_scaled, y_train_imputed)\n",
    "            \n",
    "            # 预测\n",
    "            y_pred = model.predict(X_test_scaled)\n",
    "            \n",
    "            # 计算R2\n",
    "            r2 = r2_score(y_test_imputed, y_pred)\n",
    "            \n",
    "            # 更新最佳结果\n",
    "            if r2 > best_r2:\n",
    "                best_r2 = r2\n",
    "                best_method = (pm, do, sw)\n",
    "\n",
    "print(\"最佳预处理方法：\", best_method)\n",
    "print(\"最佳R2：\", best_r2)\n",
    "\n",
    "# 使用最佳预处理方法建立PCR和PLS模型\n",
    "# 这里需要根据最佳预处理方法对数据进行预处理，然后建立模型，计算评价指标，比较模型效果\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "bc2ffa09",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PCR模型的R2： 0.9822045147581683\n",
      "PLS模型的R2： 0.9856835623972872\n",
      "PCR和PLS模型的R2比较：\n",
      "PCR R2: 0.9822045147581683\n",
      "PLS R2: 0.9856835623972872\n"
     ]
    }
   ],
   "source": [
    "#建立PCR模型\n",
    "pca = PCA(n_components=10) \n",
    "X_train_pca = pca.fit_transform(X_train_scaled) \n",
    "X_test_pca = pca.transform(X_test_scaled)\n",
    "lr_pcr = LinearRegression() \n",
    "model_pcr = lr_pcr.fit(X_train_pca, y_train_imputed) \n",
    "y_pred_pcr = model_pcr.predict(X_test_pca)\n",
    "#计算PCR模型的R2\n",
    "r2_pcr = r2_score(y_test_imputed, y_pred_pcr) \n",
    "print(\"PCR模型的R2：\", r2_pcr)\n",
    "#建立PLS模型\n",
    "pls = PLSRegression(n_components=10) \n",
    "model_pls = pls.fit(X_train_scaled, y_train_imputed) \n",
    "y_pred_pls = model_pls.predict(X_test_scaled)\n",
    "#计算PLS模型的R2\n",
    "r2_pls = r2_score(y_test_imputed, y_pred_pls) \n",
    "print(\"PLS模型的R2：\", r2_pls)\n",
    "#比较PCR和PLS模型的效果\n",
    "print('PCR和PLS模型的R2比较：') \n",
    "print('PCR R2:', r2_pcr) \n",
    "print('PLS R2:', r2_pls)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a89e503f",
   "metadata": {},
   "source": [
    "运行结果显示了PCR模型和PLS模型在测试集上的R²（确定系数）值：\n",
    "\n",
    "PCR模型的R²值为0.9822\n",
    "\n",
    "PLS模型的R²值为0.9857\n",
    "\n",
    "R²值是衡量模型拟合优度的一个重要指标，其值越接近1表示模型对数据的拟合程度越好。在这个例子中，PLS模型的R²值略高于PCR模型，说明PLS模型在这个特定的数据集上表现略好一些。总的来说，两种模型都取得了很高的R²值，表明它们对数据有很好的拟合能力，但PLS模型略占优势。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "d1974043",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "全波段RPD: -0.09791980209182763\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Found input variables with inconsistent numbers of samples: [403, 2418]",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[52], line 41\u001b[0m\n\u001b[0;32m     39\u001b[0m \u001b[38;5;66;03m# VIP特征选择\u001b[39;00m\n\u001b[0;32m     40\u001b[0m vip \u001b[38;5;241m=\u001b[39m SelectKBest(f_regression, k\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m20\u001b[39m)\n\u001b[1;32m---> 41\u001b[0m X_vip \u001b[38;5;241m=\u001b[39m \u001b[43mvip\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit_transform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_imputed\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     42\u001b[0m vip_rpd \u001b[38;5;241m=\u001b[39m build_model(X_vip, y_imputed)\n\u001b[0;32m     43\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mVIP特征选择RPD:\u001b[39m\u001b[38;5;124m\"\u001b[39m, vip_rpd)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\_set_output.py:157\u001b[0m, in \u001b[0;36m_wrap_method_output.<locals>.wrapped\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m    155\u001b[0m \u001b[38;5;129m@wraps\u001b[39m(f)\n\u001b[0;32m    156\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrapped\u001b[39m(\u001b[38;5;28mself\u001b[39m, X, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m--> 157\u001b[0m     data_to_wrap \u001b[38;5;241m=\u001b[39m \u001b[43mf\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    158\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(data_to_wrap, \u001b[38;5;28mtuple\u001b[39m):\n\u001b[0;32m    159\u001b[0m         \u001b[38;5;66;03m# only wrap the first output for cross decomposition\u001b[39;00m\n\u001b[0;32m    160\u001b[0m         return_tuple \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m    161\u001b[0m             _wrap_data_with_container(method, data_to_wrap[\u001b[38;5;241m0\u001b[39m], X, \u001b[38;5;28mself\u001b[39m),\n\u001b[0;32m    162\u001b[0m             \u001b[38;5;241m*\u001b[39mdata_to_wrap[\u001b[38;5;241m1\u001b[39m:],\n\u001b[0;32m    163\u001b[0m         )\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:919\u001b[0m, in \u001b[0;36mTransformerMixin.fit_transform\u001b[1;34m(self, X, y, **fit_params)\u001b[0m\n\u001b[0;32m    916\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfit(X, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mfit_params)\u001b[38;5;241m.\u001b[39mtransform(X)\n\u001b[0;32m    917\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    918\u001b[0m     \u001b[38;5;66;03m# fit method of arity 2 (supervised transformation)\u001b[39;00m\n\u001b[1;32m--> 919\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mfit_params\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mtransform(X)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:1152\u001b[0m, in \u001b[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001b[1;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1145\u001b[0m     estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[0;32m   1147\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m   1148\u001b[0m     skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m   1149\u001b[0m         prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m   1150\u001b[0m     )\n\u001b[0;32m   1151\u001b[0m ):\n\u001b[1;32m-> 1152\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfit_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\feature_selection\\_univariate_selection.py:498\u001b[0m, in \u001b[0;36m_BaseFilter.fit\u001b[1;34m(self, X, y)\u001b[0m\n\u001b[0;32m    480\u001b[0m \u001b[38;5;129m@_fit_context\u001b[39m(prefer_skip_nested_validation\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m    481\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mfit\u001b[39m(\u001b[38;5;28mself\u001b[39m, X, y):\n\u001b[0;32m    482\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Run score function on (X, y) and get the appropriate features.\u001b[39;00m\n\u001b[0;32m    483\u001b[0m \n\u001b[0;32m    484\u001b[0m \u001b[38;5;124;03m    Parameters\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    496\u001b[0m \u001b[38;5;124;03m        Returns the instance itself.\u001b[39;00m\n\u001b[0;32m    497\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 498\u001b[0m     X, y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_validate_data\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    499\u001b[0m \u001b[43m        \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsc\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_output\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[0;32m    500\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    502\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_check_params(X, y)\n\u001b[0;32m    503\u001b[0m     score_func_ret \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mscore_func(X, y)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:622\u001b[0m, in \u001b[0;36mBaseEstimator._validate_data\u001b[1;34m(self, X, y, reset, validate_separately, cast_to_ndarray, **check_params)\u001b[0m\n\u001b[0;32m    620\u001b[0m         y \u001b[38;5;241m=\u001b[39m check_array(y, input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mcheck_y_params)\n\u001b[0;32m    621\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 622\u001b[0m         X, y \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_X_y\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcheck_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    623\u001b[0m     out \u001b[38;5;241m=\u001b[39m X, y\n\u001b[0;32m    625\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m no_val_X \u001b[38;5;129;01mand\u001b[39;00m check_params\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mensure_2d\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mTrue\u001b[39;00m):\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:1164\u001b[0m, in \u001b[0;36mcheck_X_y\u001b[1;34m(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator)\u001b[0m\n\u001b[0;32m   1146\u001b[0m X \u001b[38;5;241m=\u001b[39m check_array(\n\u001b[0;32m   1147\u001b[0m     X,\n\u001b[0;32m   1148\u001b[0m     accept_sparse\u001b[38;5;241m=\u001b[39maccept_sparse,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   1159\u001b[0m     input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mX\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m   1160\u001b[0m )\n\u001b[0;32m   1162\u001b[0m y \u001b[38;5;241m=\u001b[39m _check_y(y, multi_output\u001b[38;5;241m=\u001b[39mmulti_output, y_numeric\u001b[38;5;241m=\u001b[39my_numeric, estimator\u001b[38;5;241m=\u001b[39mestimator)\n\u001b[1;32m-> 1164\u001b[0m \u001b[43mcheck_consistent_length\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1166\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m X, y\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:407\u001b[0m, in \u001b[0;36mcheck_consistent_length\u001b[1;34m(*arrays)\u001b[0m\n\u001b[0;32m    405\u001b[0m uniques \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39munique(lengths)\n\u001b[0;32m    406\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(uniques) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m--> 407\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    408\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFound input variables with inconsistent numbers of samples: \u001b[39m\u001b[38;5;132;01m%r\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    409\u001b[0m         \u001b[38;5;241m%\u001b[39m [\u001b[38;5;28mint\u001b[39m(l) \u001b[38;5;28;01mfor\u001b[39;00m l \u001b[38;5;129;01min\u001b[39;00m lengths]\n\u001b[0;32m    410\u001b[0m     )\n",
      "\u001b[1;31mValueError\u001b[0m: Found input variables with inconsistent numbers of samples: [403, 2418]"
     ]
    }
   ],
   "source": [
    "# 导入所需库\n",
    "from sklearn.feature_selection import f_regression, SelectKBest\n",
    "from sklearn.metrics import r2_score, mean_squared_error\n",
    "import numpy as np\n",
    "from matplotlib import pyplot as plt\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.svm import SVR\n",
    "\n",
    "# 准备数据\n",
    "X = combined_data_imputed\n",
    "y = anthraquinone_data.values.ravel()\n",
    "\n",
    "# 定义建模函数\n",
    "def build_model(X, y, n_splits=5):\n",
    "    kf = KFold(n_splits=n_splits)\n",
    "    rpd_scores = []\n",
    "    for train_index, test_index in kf.split(X):\n",
    "        X_train, X_test = X[train_index], X[test_index]\n",
    "        y_train, y_test = y[train_index], y[test_index]\n",
    "        \n",
    "        # 建立SVR模型\n",
    "        model = SVR(kernel='linear')\n",
    "        model.fit(X_train, y_train)\n",
    "        y_pred = model.predict(X_test)\n",
    "        \n",
    "        # 计算RPD值\n",
    "        rpd = 1.0 - (np.sqrt(mean_squared_error(y_test, y_pred)) / np.std(y_test))\n",
    "        rpd_scores.append(rpd)\n",
    "    return np.mean(rpd_scores)\n",
    "\n",
    "# 使用平均值填充y中的NaN值\n",
    "imputer_y = SimpleImputer(strategy='mean')\n",
    "y_imputed = imputer_y.fit_transform(y.reshape(-1, 1)).ravel()\n",
    "\n",
    "# 重新运行模型选择代码\n",
    "full_band_rpd = build_model(X, y_imputed)\n",
    "print(\"全波段RPD:\", full_band_rpd)\n",
    "\n",
    "# VIP特征选择\n",
    "vip = SelectKBest(f_regression, k=20)\n",
    "X_vip = vip.fit_transform(X, y_imputed)\n",
    "vip_rpd = build_model(X_vip, y_imputed)\n",
    "print(\"VIP特征选择RPD:\", vip_rpd)\n",
    "\n",
    "# 相关系数法特征选择\n",
    "cc = SelectKBest(lambda X, y: np.array(list(map(lambda x: np.corrcoef(x, y)[0, 1], X.T))), k=20)\n",
    "X_cc = cc.fit_transform(X, y_imputed)\n",
    "cc_rpd = build_model(X_cc, y_imputed)\n",
    "print(\"相关系数法特征选择RPD:\", cc_rpd)\n",
    "\n",
    "# 最佳建模区间选择方法\n",
    "best_rpd = max(full_band_rpd, vip_rpd, cc_rpd)\n",
    "best_method = \"全波段\" if best_rpd == full_band_rpd else (\"VIP\" if best_rpd == vip_rpd else \"相关系数法\")\n",
    "print(\"最佳建模区间选择方法：\", best_method)\n",
    "print(\"最佳RPD:\", best_rpd)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
