{
 "cells": [
  {
   "cell_type": "raw",
   "id": "cc300e0d",
   "metadata": {},
   "source": [
    "遍历数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "c9981616",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "工作表名称: ['红外谱图', '蒽醌含量']\n",
      "工作表 '红外谱图' 的内容:\n",
      "  编号\\波数  4003.497  4011.211  4018.925  4026.638  4034.352  4042.066   4049.78  \\\n",
      "0   1-1  1.264708  1.288542  1.333471  1.370217  1.352400  1.333578  1.371618   \n",
      "1   1-2  1.260399  1.293289  1.360664  1.370719  1.353501  1.321410  1.370489   \n",
      "2   1-3  1.218367  1.271432  1.299451  1.316458  1.310749  1.288309  1.347342   \n",
      "3   1-4  1.259917  1.279549  1.318026  1.315663  1.315034  1.294442  1.341085   \n",
      "4   1-5  1.227900  1.268811  1.324630  1.314813  1.320434  1.292731  1.337843   \n",
      "\n",
      "   4057.494  4065.208  ...  9935.461  9943.175  9950.889  9958.603  9966.316  \\\n",
      "0  1.414510  1.403367  ... -0.044855 -0.044209 -0.044844 -0.044485 -0.045000   \n",
      "1  1.404793  1.402450  ... -0.052916 -0.052215 -0.052981 -0.052385 -0.053302   \n",
      "2  1.396320  1.355759  ... -0.075619 -0.075195 -0.075577 -0.075273 -0.076240   \n",
      "3  1.376105  1.367473  ... -0.072897 -0.072135 -0.072650 -0.072386 -0.073184   \n",
      "4  1.386666  1.369949  ... -0.071002 -0.070456 -0.070898 -0.070672 -0.071474   \n",
      "\n",
      "    9974.03  9981.744  9989.458  9997.172  Unnamed: 779  \n",
      "0 -0.044582 -0.044907 -0.045498 -0.045294           NaN  \n",
      "1 -0.052861 -0.052986 -0.053608 -0.053561           NaN  \n",
      "2 -0.075761 -0.075899 -0.076142 -0.076000           NaN  \n",
      "3 -0.072785 -0.072895 -0.073353 -0.073166           NaN  \n",
      "4 -0.071201 -0.071496 -0.071783 -0.071296           NaN  \n",
      "\n",
      "[5 rows x 780 columns]\n",
      "工作表 '蒽醌含量' 的内容:\n",
      "  编号\\化合物       总蒽醌     芦荟大黄素       大黄酸       大黄素       大黄酚     大黄素甲醚\n",
      "0    1-1  6.385174  0.638627  2.028011  1.058405  1.753474  0.906657\n",
      "1    1-2  6.435919  0.675784  2.111518  1.098224  1.645855  0.904537\n",
      "2    1-3  6.623122  0.721373  2.340516  1.175284  1.655747  0.730202\n",
      "3    1-4  7.767245  0.841471  2.890464  1.368364  1.895988  0.770959\n",
      "4    1-5  9.247683  0.986471  3.523102  1.444994  2.186328  1.106788\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'  # 请确保文件路径正确\n",
    "\n",
    "# 使用ExcelFile类来读取Excel文件\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 获取所有工作表的名称\n",
    "sheet_names = xls.sheet_names\n",
    "print(\"工作表名称:\", sheet_names)\n",
    "\n",
    "# 遍历所有工作表并打印其内容\n",
    "for sheet_name in sheet_names:\n",
    "    # 读取每个工作表\n",
    "    sheet_data = pd.read_excel(xls, sheet_name)\n",
    "    print(f\"工作表 '{sheet_name}' 的内容:\")\n",
    "    print(sheet_data.head())  # 打印每个工作表的前几行"
   ]
  },
  {
   "cell_type": "raw",
   "id": "45681fc3",
   "metadata": {},
   "source": [
    "检查缺失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "73978621",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "编号\\波数             0\n",
      "4003.497          0\n",
      "4011.211          0\n",
      "4018.925          0\n",
      "4026.638          0\n",
      "               ... \n",
      "9974.03           0\n",
      "9981.744          0\n",
      "9989.458          0\n",
      "9997.172          0\n",
      "Unnamed: 779    372\n",
      "Length: 780, dtype: int64\n",
      "编号\\波数            0\n",
      "4003.497         0\n",
      "4011.211         0\n",
      "4018.925         0\n",
      "4026.638         0\n",
      "                ..\n",
      "9974.03          0\n",
      "9981.744         0\n",
      "9989.458         0\n",
      "9997.172         0\n",
      "Unnamed: 779    31\n",
      "Length: 780, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 读取“红外谱图”工作表\n",
    "sheet_name = '红外谱图'\n",
    "sheet_data = pd.read_excel(xls, sheet_name)\n",
    "\n",
    "# 检查缺失值\n",
    "missing_values = sheet_data.isnull().sum()\n",
    "\n",
    "# 打印缺失值统计\n",
    "print(missing_values)\n",
    "\n",
    "# 如果存在缺失值，进行插值处理\n",
    "if missing_values.sum() > 0:\n",
    "    # 使用线性插值法填充缺失值\n",
    "    sheet_data_interpolated = sheet_data.interpolate(method='linear', limit_direction='forward')\n",
    "    \n",
    "    # 再次检查缺失值\n",
    "    missing_values_interpolated = sheet_data_interpolated.isnull().sum()\n",
    "    \n",
    "    # 打印插值后的缺失值统计\n",
    "    print(missing_values_interpolated)\n",
    "    \n",
    "    # 可以选择将插值后的数据保存到新文件\n",
    "    sheet_data_interpolated.to_excel('F:\\\\研究\\\\红外数据处理\\\\插值后数据\\\\蒽醌在线提取数据_插值.xlsx', index=False)\n",
    "else:\n",
    "    print(\"“红外谱图”工作表中没有缺失值，无需插值处理。\")\n"
   ]
  },
  {
   "cell_type": "raw",
   "id": "43d048ab",
   "metadata": {},
   "source": [
    "删除缺失值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "332088b3",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "编号\\波数           0\n",
      "4003.497        0\n",
      "4011.211        0\n",
      "4018.925        0\n",
      "4026.638        0\n",
      "               ..\n",
      "9974.03         0\n",
      "9981.744        0\n",
      "9989.458        0\n",
      "9997.172        0\n",
      "Unnamed: 779    0\n",
      "Length: 780, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 读取“红外谱图”工作表\n",
    "sheet_name = '红外谱图'\n",
    "sheet_data = pd.read_excel(xls, sheet_name)\n",
    "\n",
    "# 删除含有缺失值的行\n",
    "sheet_data_cleaned = sheet_data.dropna(axis=0)\n",
    "\n",
    "# 再次检查缺失值\n",
    "missing_values_cleaned = sheet_data_cleaned.isnull().sum()\n",
    "\n",
    "# 打印清理后的缺失值统计\n",
    "print(missing_values_cleaned)\n",
    "\n",
    "# 保存清理后的数据到新文件\n",
    "sheet_data_cleaned.to_excel('F:\\\\研究\\\\红外数据处理\\\\清理后数据\\\\蒽醌在线提取数据_清理.xlsx', index=False)"
   ]
  },
  {
   "cell_type": "raw",
   "id": "9ff50d3e",
   "metadata": {},
   "source": [
    "对“蒽醌含量”工作表中的数据进行描述性统计分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "db1bc72f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              总蒽醌       芦荟大黄素         大黄酸         大黄素         大黄酚       大黄素甲醚\n",
      "count  403.000000  403.000000  403.000000  403.000000  403.000000  382.000000\n",
      "mean    66.617087    7.652840   22.889161   10.218328   20.748346    5.371562\n",
      "std     73.655321    9.596766   30.638461   10.521685   20.720696    5.353289\n",
      "min      6.058896    0.638627    1.025517    1.058405    1.600427    0.730202\n",
      "25%     26.030090    2.926961    5.302772    3.957964    8.712212    2.215962\n",
      "50%     40.682475    4.567647    8.809935    7.014078   14.629572    4.197750\n",
      "75%     65.265612    6.144755   23.766840   10.066539   24.870074    5.819826\n",
      "max    361.076461   51.397941  121.851630   50.652487  109.077068   29.280416\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 读取“蒽醌含量”工作表\n",
    "sheet_name = '蒽醌含量'\n",
    "sheet_data = pd.read_excel(xls, sheet_name)\n",
    "\n",
    "# 进行描述性统计分析\n",
    "desc_stats = sheet_data.describe()\n",
    "\n",
    "# 打印描述性统计结果\n",
    "print(desc_stats)"
   ]
  },
  {
   "cell_type": "raw",
   "id": "16f764d7",
   "metadata": {},
   "source": [
    "显示了“蒽醌含量”工作表中各个蒽醌类化合物的含量分布情况。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "2e6b4da0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "    编号\\化合物         总蒽醌      芦荟大黄素         大黄酸        大黄素         大黄酚  \\\n",
      "0      1.1    6.385174   0.638627    2.028011   1.058405    1.753474   \n",
      "1      1.2    6.435919   0.675784    2.111518   1.098224    1.645855   \n",
      "2      1.3    6.623122   0.721373    2.340516   1.175284    1.655747   \n",
      "31     2.1    6.058896   0.651961    1.710466   1.314365    1.600427   \n",
      "69     3.8   77.190875   7.222255   34.469922  13.493415   18.374415   \n",
      "..     ...         ...        ...         ...        ...         ...   \n",
      "398  13.27  351.739649  50.152255  117.636767  49.611605  105.764896   \n",
      "399  13.28  357.211661  50.797549  119.403631  50.393377  107.641302   \n",
      "400  13.29  357.664204  51.212255  118.725766  50.149306  108.421558   \n",
      "401  13.30  360.312939  51.397941  120.127678  50.523147  109.050781   \n",
      "402  13.31  361.076461  51.396765  120.669725  50.652487  109.077068   \n",
      "\n",
      "         大黄素甲醚  anomaly_iforest  anomaly_ee  \n",
      "0     0.906657               -1           1  \n",
      "1     0.904537               -1           1  \n",
      "2     0.730202               -1           1  \n",
      "31    0.781678               -1           1  \n",
      "69    3.630869               -1           1  \n",
      "..         ...              ...         ...  \n",
      "398  28.574126               -1          -1  \n",
      "399  28.975802               -1          -1  \n",
      "400  29.155319               -1          -1  \n",
      "401  29.213391               -1          -1  \n",
      "402  29.280416               -1          -1  \n",
      "\n",
      "[67 rows x 9 columns]\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.ensemble import IsolationForest\n",
    "from sklearn.covariance import EllipticEnvelope\n",
    "from sklearn.impute import SimpleImputer  # 注意这里的变化\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "sheet_name = '蒽醌含量'\n",
    "sheet_data = pd.read_excel(file_path, sheet_name, dtype={'编号\\\\化合物': str})\n",
    "\n",
    "# 将'编号\\化合物'列的'几-几'转换为'几.几'\n",
    "sheet_data['编号\\\\化合物'] = sheet_data['编号\\\\化合物'].str.replace('-', '.')\n",
    "\n",
    "# 数据标准化\n",
    "scaler = StandardScaler()\n",
    "# 使用SimpleImputer处理缺失值\n",
    "imputer = SimpleImputer(strategy='mean')  # 使用平均值来插补缺失值\n",
    "sheet_data_imputed = imputer.fit_transform(sheet_data.drop(columns=['编号\\\\化合物']))  # 插补缺失值并排除非数值列\n",
    "sheet_data_scaled = scaler.fit_transform(sheet_data_imputed)  # 标准化数据\n",
    "\n",
    "# 使用Isolation Forest进行异常值检测\n",
    "iforest = IsolationForest()\n",
    "sheet_data_iforest = iforest.fit_predict(sheet_data_scaled)\n",
    "\n",
    "# 使用EllipticEnvelope进行异常值检测\n",
    "ee = EllipticEnvelope()\n",
    "sheet_data_ee = ee.fit_predict(sheet_data_scaled)\n",
    "\n",
    "# 合并检测结果，得到最终的异常值\n",
    "sheet_data['anomaly_iforest'] = sheet_data_iforest\n",
    "sheet_data['anomaly_ee'] = sheet_data_ee\n",
    "\n",
    "# 选用所有方法都认为是异常值的样本\n",
    "anomalies = sheet_data[(sheet_data['anomaly_iforest'] == -1) | (sheet_data['anomaly_ee'] == -1)]\n",
    "\n",
    "# 打印异常值样本\n",
    "print(anomalies)\n",
    "\n",
    "# 如果需要，可以删除或标记异常值\n",
    "sheet_data_cleaned = sheet_data.drop(anomalies.index)\n",
    "sheet_data_cleaned.to_excel('F:\\\\研究\\\\红外数据处理\\\\清理后数据\\\\蒽醌含量_清理.xlsx', index=False)"
   ]
  },
  {
   "cell_type": "raw",
   "id": "2eca98b4",
   "metadata": {},
   "source": [
    "显示了异常值样本的相关信息，包括编号、化合物、各成分含量、以及异常值检测结果。\n",
    "异常值检测结果是基于两种方法：IsolationForest 和 EllipticEnvelope。每行数据的最后一列为 anomaly_iforest 和 anomaly_ee，分别表示两种方法对每个样本的检测结果。其中，-1 表示样本被检测为异常值。\n",
    "在代码中，异常值的定义是所有方法都认为是异常值的样本。根据输出结果，共有67个样本被检测为异常值。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "420c8a27",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取异常值样本的索引\n",
    "anomalies_index = anomalies.index\n",
    "\n",
    "# 删除异常值样本\n",
    "sheet_data_cleaned = sheet_data.drop(anomalies_index)\n",
    "\n",
    "# 重新保存清理后的数据到Excel文件\n",
    "sheet_data_cleaned.to_excel('F:\\\\研究\\\\红外数据处理\\\\清理后数据\\\\蒽醌含量_清理.xlsx', index=False)"
   ]
  },
  {
   "cell_type": "raw",
   "id": "867f0770",
   "metadata": {},
   "source": [
    "考虑对“红外谱图”工作表进行数据预处理，例如标准化、归一化等。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "3984f59a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.preprocessing import StandardScaler, MinMaxScaler\n",
    "\n",
    "# 加载红外谱图数据\n",
    "file_path = 'F:\\\\研究\\\\红外数据处理\\\\原始数据\\\\蒽醌在线提取数据.xlsx'\n",
    "sheet_name = '红外谱图'\n",
    "sheet_data = pd.read_excel(file_path, sheet_name)\n",
    "\n",
    "# 将'编号\\波数'列的'几-几'转换为'几.几'\n",
    "sheet_data['编号\\\\波数'] = sheet_data['编号\\\\波数'].str.replace('-', '.')\n",
    "\n",
    "# 提取特征列和标签列\n",
    "features = sheet_data.iloc[:, :-1]\n",
    "labels = sheet_data.iloc[:, -1]\n",
    "\n",
    "# 将 features 的列名称转换为字符串类型\n",
    "features.columns = features.columns.astype(str)\n",
    "\n",
    "# 标准化特征\n",
    "scaler = StandardScaler()\n",
    "features_scaled = scaler.fit_transform(features)\n",
    "\n",
    "# 归一化特征\n",
    "normalizer = MinMaxScaler()\n",
    "features_normalized = normalizer.fit_transform(features)\n",
    "\n",
    "# 将处理后的特征和标签重新组合\n",
    "features_scaled_df = pd.DataFrame(features_scaled, columns=features.columns)\n",
    "labels_df = pd.DataFrame(labels, columns=['label'])\n",
    "sheet_data_scaled = pd.concat([features_scaled_df, labels_df], axis=1)\n",
    "\n",
    "features_normalized_df = pd.DataFrame(features_normalized, columns=features.columns)\n",
    "sheet_data_normalized = pd.concat([features_normalized_df, labels_df], axis=1)\n",
    "\n",
    "# 保存标准化和归一化后的数据\n",
    "sheet_data_scaled.to_excel('F:\\\\研究\\\\红外数据处理\\\\预处理后数据\\\\蒽醌在线提取数据_标准化.xlsx', index=False)\n",
    "sheet_data_normalized.to_excel('F:\\\\研究\\\\红外数据处理\\\\预处理后数据\\\\蒽醌在线提取数据_归一化.xlsx', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "7e62e331",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "处理后的数据：\n",
      "  编号\\波数  4003.497  4011.211  4018.925  4026.638  4034.352  4042.066   4049.78  \\\n",
      "0   1.1  1.264708  1.288542  1.333471  1.370217  1.352400  1.333578  1.371618   \n",
      "1   1.2  1.260399  1.293289  1.360664  1.370719  1.353501  1.321410  1.370489   \n",
      "2   1.3  1.218367  1.271432  1.299451  1.316458  1.310749  1.288309  1.347342   \n",
      "3   1.4  1.259917  1.279549  1.318026  1.315663  1.315034  1.294442  1.341085   \n",
      "4   1.5  1.227900  1.268811  1.324630  1.314813  1.320434  1.292731  1.337843   \n",
      "\n",
      "   4057.494  4065.208  ...  9927.747  9935.461  9943.175  9950.889  9958.603  \\\n",
      "0  1.414510  1.403367  ... -0.043580 -0.044855 -0.044209 -0.044844 -0.044485   \n",
      "1  1.404793  1.402450  ... -0.051729 -0.052916 -0.052215 -0.052981 -0.052385   \n",
      "2  1.396320  1.355759  ... -0.074564 -0.075619 -0.075195 -0.075577 -0.075273   \n",
      "3  1.376105  1.367473  ... -0.071837 -0.072897 -0.072135 -0.072650 -0.072386   \n",
      "4  1.386666  1.369949  ... -0.070067 -0.071002 -0.070456 -0.070898 -0.070672   \n",
      "\n",
      "   9966.316   9974.03  9981.744  9989.458  9997.172  \n",
      "0 -0.045000 -0.044582 -0.044907 -0.045498 -0.045294  \n",
      "1 -0.053302 -0.052861 -0.052986 -0.053608 -0.053561  \n",
      "2 -0.076240 -0.075761 -0.075899 -0.076142 -0.076000  \n",
      "3 -0.073184 -0.072785 -0.072895 -0.073353 -0.073166  \n",
      "4 -0.071474 -0.071201 -0.071496 -0.071783 -0.071296  \n",
      "\n",
      "[5 rows x 779 columns]\n",
      "\n",
      "标准化后的特征数据：\n",
      "      编号\\波数  4003.497  4011.211  4018.925  4026.638  4034.352  4042.066  \\\n",
      "0 -1.652153 -0.175096 -0.259583 -0.155697  0.359376  0.416867  0.076303   \n",
      "1 -1.625465 -0.304315 -0.120504  0.651225  0.374358  0.450208 -0.275983   \n",
      "2 -1.598777 -1.564784 -0.760877 -1.165202 -1.244949 -0.844449 -1.234320   \n",
      "3 -1.572089 -0.318770 -0.523063 -0.614009 -1.268674 -0.714687 -1.056758   \n",
      "4 -1.545401 -1.278905 -0.837668 -0.418043 -1.294041 -0.551159 -1.106295   \n",
      "\n",
      "    4049.78  4057.494  4065.208  ...  9927.747  9935.461  9943.175  9950.889  \\\n",
      "0  0.026648  0.217535  0.330637  ...  2.609830  2.601031  2.611197  2.586417   \n",
      "1 -0.006492 -0.063358  0.304173  ...  1.696323  1.698176  1.716118  1.673320   \n",
      "2 -0.685935 -0.308291 -1.043299  ... -0.863197 -0.844629 -0.852875 -0.862318   \n",
      "3 -0.869600 -0.892654 -0.705241  ... -0.557598 -0.539755 -0.510850 -0.533841   \n",
      "4 -0.964763 -0.587363 -0.633785  ... -0.359130 -0.327527 -0.323075 -0.337235   \n",
      "\n",
      "   9958.603  9966.316   9974.03  9981.744  9989.458  9997.172  \n",
      "0  2.580771  2.595379  2.600127  2.596499  2.581280  2.552763  \n",
      "1  1.697312  1.667873  1.678736  1.698169  1.675512  1.635795  \n",
      "2 -0.862157 -0.894554 -0.869908 -0.849741 -0.841353 -0.852967  \n",
      "3 -0.539313 -0.553146 -0.538741 -0.515641 -0.529826 -0.538643  \n",
      "4 -0.347607 -0.362129 -0.362393 -0.360064 -0.354483 -0.331261  \n",
      "\n",
      "[5 rows x 779 columns]\n",
      "\n",
      "标准化后的完整数据：\n",
      "      编号\\波数  4003.497  4011.211  4018.925  4026.638  4034.352  4042.066  \\\n",
      "0 -1.652153 -0.175096 -0.259583 -0.155697  0.359376  0.416867  0.076303   \n",
      "1 -1.625465 -0.304315 -0.120504  0.651225  0.374358  0.450208 -0.275983   \n",
      "2 -1.598777 -1.564784 -0.760877 -1.165202 -1.244949 -0.844449 -1.234320   \n",
      "3 -1.572089 -0.318770 -0.523063 -0.614009 -1.268674 -0.714687 -1.056758   \n",
      "4 -1.545401 -1.278905 -0.837668 -0.418043 -1.294041 -0.551159 -1.106295   \n",
      "\n",
      "    4049.78  4057.494  4065.208  ...  9935.461  9943.175  9950.889  9958.603  \\\n",
      "0  0.026648  0.217535  0.330637  ...  2.601031  2.611197  2.586417  2.580771   \n",
      "1 -0.006492 -0.063358  0.304173  ...  1.698176  1.716118  1.673320  1.697312   \n",
      "2 -0.685935 -0.308291 -1.043299  ... -0.844629 -0.852875 -0.862318 -0.862157   \n",
      "3 -0.869600 -0.892654 -0.705241  ... -0.539755 -0.510850 -0.533841 -0.539313   \n",
      "4 -0.964763 -0.587363 -0.633785  ... -0.327527 -0.323075 -0.337235 -0.347607   \n",
      "\n",
      "   9966.316   9974.03  9981.744  9989.458  9997.172  label  \n",
      "0  2.595379  2.600127  2.596499  2.581280  2.552763    NaN  \n",
      "1  1.667873  1.678736  1.698169  1.675512  1.635795    NaN  \n",
      "2 -0.894554 -0.869908 -0.849741 -0.841353 -0.852967    NaN  \n",
      "3 -0.553146 -0.538741 -0.515641 -0.529826 -0.538643    NaN  \n",
      "4 -0.362129 -0.362393 -0.360064 -0.354483 -0.331261    NaN  \n",
      "\n",
      "[5 rows x 780 columns]\n"
     ]
    }
   ],
   "source": [
    "print(\"\\n处理后的数据：\")\n",
    "print(features.head())\n",
    "\n",
    "print(\"\\n标准化后的特征数据：\")\n",
    "print(features_scaled_df.head())\n",
    "\n",
    "print(\"\\n标准化后的完整数据：\")\n",
    "print(sheet_data_scaled.head())"
   ]
  },
  {
   "cell_type": "raw",
   "id": "992b1717",
   "metadata": {},
   "source": [
    "选择建模特征，建立机器学习模型，并进行交叉验证。\n",
    "使用模型进行预测，并评估模型的性能。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "bf71f0f5",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "Input y contains NaN.",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[44], line 14\u001b[0m\n\u001b[0;32m     11\u001b[0m \u001b[38;5;66;03m# 选择与标签最相关的特征\u001b[39;00m\n\u001b[0;32m     12\u001b[0m \u001b[38;5;66;03m# 假设最后一列是标签，如果不是，请根据实际情况调整\u001b[39;00m\n\u001b[0;32m     13\u001b[0m selector \u001b[38;5;241m=\u001b[39m SelectKBest(f_regression, k\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m)\n\u001b[1;32m---> 14\u001b[0m \u001b[43mselector\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43msheet_data_scaled\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdrop\u001b[49m\u001b[43m(\u001b[49m\u001b[43msheet_data_scaled\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcolumns\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msheet_data_scaled\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43miloc\u001b[49m\u001b[43m[\u001b[49m\u001b[43m:\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m-\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     15\u001b[0m selected_features \u001b[38;5;241m=\u001b[39m sheet_data_scaled\u001b[38;5;241m.\u001b[39mcolumns[selector\u001b[38;5;241m.\u001b[39mget_support()]\n\u001b[0;32m     17\u001b[0m \u001b[38;5;66;03m# 输出选出的特征\u001b[39;00m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:1152\u001b[0m, in \u001b[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001b[1;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1145\u001b[0m     estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[0;32m   1147\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m   1148\u001b[0m     skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m   1149\u001b[0m         prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m   1150\u001b[0m     )\n\u001b[0;32m   1151\u001b[0m ):\n\u001b[1;32m-> 1152\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfit_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\feature_selection\\_univariate_selection.py:498\u001b[0m, in \u001b[0;36m_BaseFilter.fit\u001b[1;34m(self, X, y)\u001b[0m\n\u001b[0;32m    480\u001b[0m \u001b[38;5;129m@_fit_context\u001b[39m(prefer_skip_nested_validation\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m    481\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mfit\u001b[39m(\u001b[38;5;28mself\u001b[39m, X, y):\n\u001b[0;32m    482\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"Run score function on (X, y) and get the appropriate features.\u001b[39;00m\n\u001b[0;32m    483\u001b[0m \n\u001b[0;32m    484\u001b[0m \u001b[38;5;124;03m    Parameters\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    496\u001b[0m \u001b[38;5;124;03m        Returns the instance itself.\u001b[39;00m\n\u001b[0;32m    497\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 498\u001b[0m     X, y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_validate_data\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    499\u001b[0m \u001b[43m        \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsc\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_output\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[0;32m    500\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    502\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_check_params(X, y)\n\u001b[0;32m    503\u001b[0m     score_func_ret \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mscore_func(X, y)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:622\u001b[0m, in \u001b[0;36mBaseEstimator._validate_data\u001b[1;34m(self, X, y, reset, validate_separately, cast_to_ndarray, **check_params)\u001b[0m\n\u001b[0;32m    620\u001b[0m         y \u001b[38;5;241m=\u001b[39m check_array(y, input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mcheck_y_params)\n\u001b[0;32m    621\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 622\u001b[0m         X, y \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_X_y\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcheck_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    623\u001b[0m     out \u001b[38;5;241m=\u001b[39m X, y\n\u001b[0;32m    625\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m no_val_X \u001b[38;5;129;01mand\u001b[39;00m check_params\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mensure_2d\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mTrue\u001b[39;00m):\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:1162\u001b[0m, in \u001b[0;36mcheck_X_y\u001b[1;34m(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator)\u001b[0m\n\u001b[0;32m   1142\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m   1143\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mestimator_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m requires y to be passed, but the target y is None\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1144\u001b[0m     )\n\u001b[0;32m   1146\u001b[0m X \u001b[38;5;241m=\u001b[39m check_array(\n\u001b[0;32m   1147\u001b[0m     X,\n\u001b[0;32m   1148\u001b[0m     accept_sparse\u001b[38;5;241m=\u001b[39maccept_sparse,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   1159\u001b[0m     input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mX\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m   1160\u001b[0m )\n\u001b[1;32m-> 1162\u001b[0m y \u001b[38;5;241m=\u001b[39m \u001b[43m_check_y\u001b[49m\u001b[43m(\u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_output\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmulti_output\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_numeric\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43my_numeric\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mestimator\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1164\u001b[0m check_consistent_length(X, y)\n\u001b[0;32m   1166\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m X, y\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:1172\u001b[0m, in \u001b[0;36m_check_y\u001b[1;34m(y, multi_output, y_numeric, estimator)\u001b[0m\n\u001b[0;32m   1170\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Isolated part of check_X_y dedicated to y validation\"\"\"\u001b[39;00m\n\u001b[0;32m   1171\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m multi_output:\n\u001b[1;32m-> 1172\u001b[0m     y \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_array\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   1173\u001b[0m \u001b[43m        \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1174\u001b[0m \u001b[43m        \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1175\u001b[0m \u001b[43m        \u001b[49m\u001b[43mforce_all_finite\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m   1176\u001b[0m \u001b[43m        \u001b[49m\u001b[43mensure_2d\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m   1177\u001b[0m \u001b[43m        \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m   1178\u001b[0m \u001b[43m        \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43my\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1179\u001b[0m \u001b[43m        \u001b[49m\u001b[43mestimator\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1180\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1181\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m   1182\u001b[0m     estimator_name \u001b[38;5;241m=\u001b[39m _check_estimator_name(estimator)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:957\u001b[0m, in \u001b[0;36mcheck_array\u001b[1;34m(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator, input_name)\u001b[0m\n\u001b[0;32m    951\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    952\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFound array with dim \u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m expected <= 2.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    953\u001b[0m             \u001b[38;5;241m%\u001b[39m (array\u001b[38;5;241m.\u001b[39mndim, estimator_name)\n\u001b[0;32m    954\u001b[0m         )\n\u001b[0;32m    956\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m force_all_finite:\n\u001b[1;32m--> 957\u001b[0m         \u001b[43m_assert_all_finite\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    958\u001b[0m \u001b[43m            \u001b[49m\u001b[43marray\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    959\u001b[0m \u001b[43m            \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    960\u001b[0m \u001b[43m            \u001b[49m\u001b[43mestimator_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    961\u001b[0m \u001b[43m            \u001b[49m\u001b[43mallow_nan\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_all_finite\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mallow-nan\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m    962\u001b[0m \u001b[43m        \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    964\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ensure_min_samples \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m    965\u001b[0m     n_samples \u001b[38;5;241m=\u001b[39m _num_samples(array)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:122\u001b[0m, in \u001b[0;36m_assert_all_finite\u001b[1;34m(X, allow_nan, msg_dtype, estimator_name, input_name)\u001b[0m\n\u001b[0;32m    119\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m first_pass_isfinite:\n\u001b[0;32m    120\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m--> 122\u001b[0m \u001b[43m_assert_all_finite_element_wise\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    123\u001b[0m \u001b[43m    \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    124\u001b[0m \u001b[43m    \u001b[49m\u001b[43mxp\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mxp\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    125\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_nan\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mallow_nan\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    126\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmsg_dtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmsg_dtype\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    127\u001b[0m \u001b[43m    \u001b[49m\u001b[43mestimator_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    128\u001b[0m \u001b[43m    \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    129\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:171\u001b[0m, in \u001b[0;36m_assert_all_finite_element_wise\u001b[1;34m(X, xp, allow_nan, msg_dtype, estimator_name, input_name)\u001b[0m\n\u001b[0;32m    154\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m estimator_name \u001b[38;5;129;01mand\u001b[39;00m input_name \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mX\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m has_nan_error:\n\u001b[0;32m    155\u001b[0m     \u001b[38;5;66;03m# Improve the error message on how to handle missing values in\u001b[39;00m\n\u001b[0;32m    156\u001b[0m     \u001b[38;5;66;03m# scikit-learn.\u001b[39;00m\n\u001b[0;32m    157\u001b[0m     msg_err \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m    158\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mestimator_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m does not accept missing values\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    159\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m encoded as NaN natively. For supervised learning, you might want\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    169\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m#estimators-that-handle-nan-values\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    170\u001b[0m     )\n\u001b[1;32m--> 171\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(msg_err)\n",
      "\u001b[1;31mValueError\u001b[0m: Input y contains NaN."
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.feature_selection import SelectKBest'`\n",
    "from sklearn.feature_selection import f_regression\n",
    "\n",
    "# 假设 sheet_data_scaled 包含了预处理后的光谱数据，且最后一列是标签\n",
    "sheet_data_scaled = pd.read_excel('F:\\\\研究\\\\红外数据处理\\\\预处理后数据\\\\蒽醌在线提取数据_标准化.xlsx', na_values=['NA', ' '])\n",
    "\n",
    "# 或者用平均值填充 NaN 值\n",
    "sheet_data_scaled.fillna(sheet_data_scaled.mean(), inplace=True)\n",
    "\n",
    "# 选择与标签最相关的特征\n",
    "# 假设最后一列是标签，如果不是，请根据实际情况调整\n",
    "selector = SelectKBest(f_regression, k=10)\n",
    "selector.fit(sheet_data_scaled.drop(sheet_data_scaled.columns[-1], axis=1), sheet_data_scaled.iloc[:, -1])\n",
    "selected_features = sheet_data_scaled.columns[selector.get_support()]\n",
    "\n",
    "# 输出选出的特征\n",
    "print(\"选出的特征：\")\n",
    "print(selected_features)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "cc56c23e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "工作表名称: ['红外谱图', '蒽醌含量']\n",
      "工作表 '红外谱图' 的内容:\n",
      "  编号\\波数  4003.497  4011.211  4018.925  4026.638  4034.352  4042.066   4049.78  \\\n",
      "0   1-1  1.264708  1.288542  1.333471  1.370217  1.352400  1.333578  1.371618   \n",
      "1   1-2  1.260399  1.293289  1.360664  1.370719  1.353501  1.321410  1.370489   \n",
      "2   1-3  1.218367  1.271432  1.299451  1.316458  1.310749  1.288309  1.347342   \n",
      "3   1-4  1.259917  1.279549  1.318026  1.315663  1.315034  1.294442  1.341085   \n",
      "4   1-5  1.227900  1.268811  1.324630  1.314813  1.320434  1.292731  1.337843   \n",
      "\n",
      "   4057.494  4065.208  ...  9927.747  9935.461  9943.175  9950.889  9958.603  \\\n",
      "0  1.414510  1.403367  ... -0.043580 -0.044855 -0.044209 -0.044844 -0.044485   \n",
      "1  1.404793  1.402450  ... -0.051729 -0.052916 -0.052215 -0.052981 -0.052385   \n",
      "2  1.396320  1.355759  ... -0.074564 -0.075619 -0.075195 -0.075577 -0.075273   \n",
      "3  1.376105  1.367473  ... -0.071837 -0.072897 -0.072135 -0.072650 -0.072386   \n",
      "4  1.386666  1.369949  ... -0.070067 -0.071002 -0.070456 -0.070898 -0.070672   \n",
      "\n",
      "   9966.316   9974.03  9981.744  9989.458  9997.172  \n",
      "0 -0.045000 -0.044582 -0.044907 -0.045498 -0.045294  \n",
      "1 -0.053302 -0.052861 -0.052986 -0.053608 -0.053561  \n",
      "2 -0.076240 -0.075761 -0.075899 -0.076142 -0.076000  \n",
      "3 -0.073184 -0.072785 -0.072895 -0.073353 -0.073166  \n",
      "4 -0.071474 -0.071201 -0.071496 -0.071783 -0.071296  \n",
      "\n",
      "[5 rows x 779 columns]\n",
      "工作表 '蒽醌含量' 的内容:\n",
      "  编号\\化合物       总蒽醌     芦荟大黄素       大黄酸       大黄素       大黄酚     大黄素甲醚\n",
      "0    1-1  6.385174  0.638627  2.028011  1.058405  1.753474  0.906657\n",
      "1    1-2  6.435919  0.675784  2.111518  1.098224  1.645855  0.904537\n",
      "2    1-3  6.623122  0.721373  2.340516  1.175284  1.655747  0.730202\n",
      "3    1-4  7.767245  0.841471  2.890464  1.368364  1.895988  0.770959\n",
      "4    1-5  9.247683  0.986471  3.523102  1.444994  2.186328  1.106788\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\蒽醌在线提取数据.xlsx'  # 请确保文件路径正确\n",
    "\n",
    "# 使用ExcelFile类来读取Excel文件\n",
    "xls = pd.ExcelFile(file_path)\n",
    "\n",
    "# 获取所有工作表的名称\n",
    "sheet_names = xls.sheet_names\n",
    "print(\"工作表名称:\", sheet_names)\n",
    "\n",
    "# 遍历所有工作表并打印其内容\n",
    "for sheet_name in sheet_names:\n",
    "    # 读取每个工作表\n",
    "    sheet_data = pd.read_excel(xls, sheet_name)\n",
    "    print(f\"工作表 '{sheet_name}' 的内容:\")\n",
    "    print(sheet_data.head())  # 打印每个工作表的前几行\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "5abcce5e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Mean Squared Error: 9.039294784869442\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.ensemble import RandomForestRegressor\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "# 加载Excel文件\n",
    "file_path = 'F:\\\\研究\\\\蒽醌在线提取数据.xlsx'\n",
    "data_ir = pd.read_excel(file_path, sheet_name='红外谱图')  # 红外谱图数据\n",
    "data_content = pd.read_excel(file_path, sheet_name='蒽醌含量')  # 蒽醌含量数据\n",
    "\n",
    "# 数据准备\n",
    "# 编号列作为索引\n",
    "data_ir.set_index('编号\\波数', inplace=True)\n",
    "data_content.set_index('编号\\化合物', inplace=True)\n",
    "\n",
    "# 特征（X）和目标（y）\n",
    "X = data_ir\n",
    "y = data_content\n",
    "\n",
    "# 数据分割\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 数据标准化\n",
    "scaler = StandardScaler()\n",
    "X_train = scaler.fit_transform(X_train)\n",
    "X_test = scaler.transform(X_test)\n",
    "\n",
    "# 模型选择\n",
    "model = RandomForestRegressor(n_estimators=100, random_state=42)\n",
    "\n",
    "# 模型训练\n",
    "model.fit(X_train, y_train)\n",
    "\n",
    "# 模型评估\n",
    "y_pred = model.predict(X_test)\n",
    "mse = mean_squared_error(y_test, y_pred)\n",
    "print(f'Mean Squared Error: {mse}')\n",
    "\n",
    "# 模型优化\n",
    "# 根据模型性能，您可以尝试不同的模型参数或选择不同的算法来进行优化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "895b75da",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "ename": "ValueError",
     "evalue": "Input y contains NaN.",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[54], line 26\u001b[0m\n\u001b[0;32m     24\u001b[0m \u001b[38;5;66;03m# 使用线性回归模型进行训练和预测\u001b[39;00m\n\u001b[0;32m     25\u001b[0m lr \u001b[38;5;241m=\u001b[39m LinearRegression()\n\u001b[1;32m---> 26\u001b[0m \u001b[43mlr\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     27\u001b[0m y_pred \u001b[38;5;241m=\u001b[39m lr\u001b[38;5;241m.\u001b[39mpredict(X_test)\n\u001b[0;32m     29\u001b[0m \u001b[38;5;66;03m# 评估模型性能\u001b[39;00m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:1152\u001b[0m, in \u001b[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001b[1;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1145\u001b[0m     estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[0;32m   1147\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m   1148\u001b[0m     skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m   1149\u001b[0m         prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m   1150\u001b[0m     )\n\u001b[0;32m   1151\u001b[0m ):\n\u001b[1;32m-> 1152\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfit_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\linear_model\\_base.py:678\u001b[0m, in \u001b[0;36mLinearRegression.fit\u001b[1;34m(self, X, y, sample_weight)\u001b[0m\n\u001b[0;32m    674\u001b[0m n_jobs_ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_jobs\n\u001b[0;32m    676\u001b[0m accept_sparse \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpositive \u001b[38;5;28;01melse\u001b[39;00m [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcsr\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcsc\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcoo\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m--> 678\u001b[0m X, y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_validate_data\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    679\u001b[0m \u001b[43m    \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maccept_sparse\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_numeric\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_output\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\n\u001b[0;32m    680\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    682\u001b[0m has_sw \u001b[38;5;241m=\u001b[39m sample_weight \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m    683\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_sw:\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\base.py:622\u001b[0m, in \u001b[0;36mBaseEstimator._validate_data\u001b[1;34m(self, X, y, reset, validate_separately, cast_to_ndarray, **check_params)\u001b[0m\n\u001b[0;32m    620\u001b[0m         y \u001b[38;5;241m=\u001b[39m check_array(y, input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mcheck_y_params)\n\u001b[0;32m    621\u001b[0m     \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 622\u001b[0m         X, y \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_X_y\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcheck_params\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    623\u001b[0m     out \u001b[38;5;241m=\u001b[39m X, y\n\u001b[0;32m    625\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m no_val_X \u001b[38;5;129;01mand\u001b[39;00m check_params\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mensure_2d\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mTrue\u001b[39;00m):\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:1162\u001b[0m, in \u001b[0;36mcheck_X_y\u001b[1;34m(X, y, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, multi_output, ensure_min_samples, ensure_min_features, y_numeric, estimator)\u001b[0m\n\u001b[0;32m   1142\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m   1143\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mestimator_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m requires y to be passed, but the target y is None\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1144\u001b[0m     )\n\u001b[0;32m   1146\u001b[0m X \u001b[38;5;241m=\u001b[39m check_array(\n\u001b[0;32m   1147\u001b[0m     X,\n\u001b[0;32m   1148\u001b[0m     accept_sparse\u001b[38;5;241m=\u001b[39maccept_sparse,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   1159\u001b[0m     input_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mX\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m   1160\u001b[0m )\n\u001b[1;32m-> 1162\u001b[0m y \u001b[38;5;241m=\u001b[39m \u001b[43m_check_y\u001b[49m\u001b[43m(\u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmulti_output\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmulti_output\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_numeric\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43my_numeric\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mestimator\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1164\u001b[0m check_consistent_length(X, y)\n\u001b[0;32m   1166\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m X, y\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:1172\u001b[0m, in \u001b[0;36m_check_y\u001b[1;34m(y, multi_output, y_numeric, estimator)\u001b[0m\n\u001b[0;32m   1170\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Isolated part of check_X_y dedicated to y validation\"\"\"\u001b[39;00m\n\u001b[0;32m   1171\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m multi_output:\n\u001b[1;32m-> 1172\u001b[0m     y \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_array\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m   1173\u001b[0m \u001b[43m        \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1174\u001b[0m \u001b[43m        \u001b[49m\u001b[43maccept_sparse\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcsr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1175\u001b[0m \u001b[43m        \u001b[49m\u001b[43mforce_all_finite\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m   1176\u001b[0m \u001b[43m        \u001b[49m\u001b[43mensure_2d\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m   1177\u001b[0m \u001b[43m        \u001b[49m\u001b[43mdtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m   1178\u001b[0m \u001b[43m        \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43my\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1179\u001b[0m \u001b[43m        \u001b[49m\u001b[43mestimator\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m   1180\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1181\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m   1182\u001b[0m     estimator_name \u001b[38;5;241m=\u001b[39m _check_estimator_name(estimator)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:957\u001b[0m, in \u001b[0;36mcheck_array\u001b[1;34m(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator, input_name)\u001b[0m\n\u001b[0;32m    951\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    952\u001b[0m             \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFound array with dim \u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m expected <= 2.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    953\u001b[0m             \u001b[38;5;241m%\u001b[39m (array\u001b[38;5;241m.\u001b[39mndim, estimator_name)\n\u001b[0;32m    954\u001b[0m         )\n\u001b[0;32m    956\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m force_all_finite:\n\u001b[1;32m--> 957\u001b[0m         \u001b[43m_assert_all_finite\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    958\u001b[0m \u001b[43m            \u001b[49m\u001b[43marray\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    959\u001b[0m \u001b[43m            \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    960\u001b[0m \u001b[43m            \u001b[49m\u001b[43mestimator_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    961\u001b[0m \u001b[43m            \u001b[49m\u001b[43mallow_nan\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_all_finite\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mallow-nan\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m    962\u001b[0m \u001b[43m        \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    964\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ensure_min_samples \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m    965\u001b[0m     n_samples \u001b[38;5;241m=\u001b[39m _num_samples(array)\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:122\u001b[0m, in \u001b[0;36m_assert_all_finite\u001b[1;34m(X, allow_nan, msg_dtype, estimator_name, input_name)\u001b[0m\n\u001b[0;32m    119\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m first_pass_isfinite:\n\u001b[0;32m    120\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m--> 122\u001b[0m \u001b[43m_assert_all_finite_element_wise\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    123\u001b[0m \u001b[43m    \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    124\u001b[0m \u001b[43m    \u001b[49m\u001b[43mxp\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mxp\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    125\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_nan\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mallow_nan\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    126\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmsg_dtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmsg_dtype\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    127\u001b[0m \u001b[43m    \u001b[49m\u001b[43mestimator_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mestimator_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    128\u001b[0m \u001b[43m    \u001b[49m\u001b[43minput_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minput_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    129\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:171\u001b[0m, in \u001b[0;36m_assert_all_finite_element_wise\u001b[1;34m(X, xp, allow_nan, msg_dtype, estimator_name, input_name)\u001b[0m\n\u001b[0;32m    154\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m estimator_name \u001b[38;5;129;01mand\u001b[39;00m input_name \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mX\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m has_nan_error:\n\u001b[0;32m    155\u001b[0m     \u001b[38;5;66;03m# Improve the error message on how to handle missing values in\u001b[39;00m\n\u001b[0;32m    156\u001b[0m     \u001b[38;5;66;03m# scikit-learn.\u001b[39;00m\n\u001b[0;32m    157\u001b[0m     msg_err \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m    158\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mestimator_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m does not accept missing values\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    159\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m encoded as NaN natively. For supervised learning, you might want\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    169\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m#estimators-that-handle-nan-values\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    170\u001b[0m     )\n\u001b[1;32m--> 171\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(msg_err)\n",
      "\u001b[1;31mValueError\u001b[0m: Input y contains NaN."
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "from sklearn.impute import SimpleImputer\n",
    "\n",
    "# 加载Excel文件并合并数据\n",
    "# ...（之前的代码）\n",
    "\n",
    "# 选择特征和目标\n",
    "X = merged_data.drop(columns=['总蒽醌', '芦荟大黄素', '大黄酸', '大黄素', '大黄酚', '大黄素甲醚'])\n",
    "y = merged_data[['总蒽醌', '芦荟大黄素', '大黄酸', '大黄素', '大黄酚', '大黄素甲醚']]\n",
    "\n",
    "# 将X的所有列名转换为字符串类型\n",
    "X.columns = X.columns.astype(str)\n",
    "\n",
    "# 处理缺失值\n",
    "imputer = SimpleImputer(strategy='mean')  # 使用平均值填充缺失值\n",
    "X_imputed = imputer.fit_transform(X)\n",
    "\n",
    "# 划分数据集\n",
    "X_train, X_test, y_train, y_test = train_test_split(X_imputed, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 使用线性回归模型进行训练和预测\n",
    "lr = LinearRegression()\n",
    "lr.fit(X_train, y_train)\n",
    "y_pred = lr.predict(X_test)\n",
    "\n",
    "# 评估模型性能\n",
    "mse = mean_squared_error(y_test, y_pred)\n",
    "r2 = r2_score(y_test, y_pred)\n",
    "\n",
    "print(f\"MSE: {mse}\")\n",
    "print(f\"R2 Score: {r2}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "296a859a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(31, 780) (382, 6)\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Found input variables with inconsistent numbers of samples: [31, 382]",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[58], line 30\u001b[0m\n\u001b[0;32m     27\u001b[0m \u001b[38;5;28mprint\u001b[39m(X\u001b[38;5;241m.\u001b[39mshape, y\u001b[38;5;241m.\u001b[39mshape)\n\u001b[0;32m     29\u001b[0m \u001b[38;5;66;03m# 划分数据集\u001b[39;00m\n\u001b[1;32m---> 30\u001b[0m X_train, X_test, y_train, y_test \u001b[38;5;241m=\u001b[39m \u001b[43mtrain_test_split\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtest_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrandom_state\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m42\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m     32\u001b[0m \u001b[38;5;66;03m# 使用线性回归模型进行训练和预测\u001b[39;00m\n\u001b[0;32m     33\u001b[0m lr \u001b[38;5;241m=\u001b[39m LinearRegression()\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\_param_validation.py:214\u001b[0m, in \u001b[0;36mvalidate_params.<locals>.decorator.<locals>.wrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m    208\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m    209\u001b[0m     \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m    210\u001b[0m         skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m    211\u001b[0m             prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m    212\u001b[0m         )\n\u001b[0;32m    213\u001b[0m     ):\n\u001b[1;32m--> 214\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    215\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m InvalidParameterError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    216\u001b[0m     \u001b[38;5;66;03m# When the function is just a wrapper around an estimator, we allow\u001b[39;00m\n\u001b[0;32m    217\u001b[0m     \u001b[38;5;66;03m# the function to delegate validation to the estimator, but we replace\u001b[39;00m\n\u001b[0;32m    218\u001b[0m     \u001b[38;5;66;03m# the name of the estimator by the name of the function in the error\u001b[39;00m\n\u001b[0;32m    219\u001b[0m     \u001b[38;5;66;03m# message to avoid confusion.\u001b[39;00m\n\u001b[0;32m    220\u001b[0m     msg \u001b[38;5;241m=\u001b[39m re\u001b[38;5;241m.\u001b[39msub(\n\u001b[0;32m    221\u001b[0m         \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mparameter of \u001b[39m\u001b[38;5;124m\\\u001b[39m\u001b[38;5;124mw+ must be\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m    222\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mparameter of \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__qualname__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m must be\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m    223\u001b[0m         \u001b[38;5;28mstr\u001b[39m(e),\n\u001b[0;32m    224\u001b[0m     )\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\model_selection\\_split.py:2646\u001b[0m, in \u001b[0;36mtrain_test_split\u001b[1;34m(test_size, train_size, random_state, shuffle, stratify, *arrays)\u001b[0m\n\u001b[0;32m   2643\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m n_arrays \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m   2644\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAt least one array required as input\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m-> 2646\u001b[0m arrays \u001b[38;5;241m=\u001b[39m \u001b[43mindexable\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43marrays\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   2648\u001b[0m n_samples \u001b[38;5;241m=\u001b[39m _num_samples(arrays[\u001b[38;5;241m0\u001b[39m])\n\u001b[0;32m   2649\u001b[0m n_train, n_test \u001b[38;5;241m=\u001b[39m _validate_shuffle_split(\n\u001b[0;32m   2650\u001b[0m     n_samples, test_size, train_size, default_test_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.25\u001b[39m\n\u001b[0;32m   2651\u001b[0m )\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:453\u001b[0m, in \u001b[0;36mindexable\u001b[1;34m(*iterables)\u001b[0m\n\u001b[0;32m    434\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Make arrays indexable for cross-validation.\u001b[39;00m\n\u001b[0;32m    435\u001b[0m \n\u001b[0;32m    436\u001b[0m \u001b[38;5;124;03mChecks consistent length, passes through None, and ensures that everything\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    449\u001b[0m \u001b[38;5;124;03m    sparse matrix, or dataframe) or `None`.\u001b[39;00m\n\u001b[0;32m    450\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    452\u001b[0m result \u001b[38;5;241m=\u001b[39m [_make_indexable(X) \u001b[38;5;28;01mfor\u001b[39;00m X \u001b[38;5;129;01min\u001b[39;00m iterables]\n\u001b[1;32m--> 453\u001b[0m \u001b[43mcheck_consistent_length\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mresult\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    454\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result\n",
      "File \u001b[1;32m~\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\sklearn\\utils\\validation.py:407\u001b[0m, in \u001b[0;36mcheck_consistent_length\u001b[1;34m(*arrays)\u001b[0m\n\u001b[0;32m    405\u001b[0m uniques \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39munique(lengths)\n\u001b[0;32m    406\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(uniques) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m--> 407\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    408\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFound input variables with inconsistent numbers of samples: \u001b[39m\u001b[38;5;132;01m%r\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    409\u001b[0m         \u001b[38;5;241m%\u001b[39m [\u001b[38;5;28mint\u001b[39m(l) \u001b[38;5;28;01mfor\u001b[39;00m l \u001b[38;5;129;01min\u001b[39;00m lengths]\n\u001b[0;32m    410\u001b[0m     )\n",
      "\u001b[1;31mValueError\u001b[0m: Found input variables with inconsistent numbers of samples: [31, 382]"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.metrics import mean_squared_error, r2_score\n",
    "from sklearn.impute import SimpleImputer\n",
    "\n",
    "# 加载Excel文件并合并数据\n",
    "# ...（之前的代码）\n",
    "\n",
    "# 选择特征和目标\n",
    "X = merged_data.drop(columns=['总蒽醌', '芦荟大黄素', '大黄酸', '大黄素', '大黄酚', '大黄素甲醚'])\n",
    "y = merged_data[['总蒽醌', '芦荟大黄素', '大黄酸', '大黄素', '大黄酚', '大黄素甲醚']]\n",
    "\n",
    "# 将X的所有列名转换为字符串类型\n",
    "X.columns = X.columns.astype(str)\n",
    "\n",
    "# 使用相同的索引来过滤X和y，以确保它们在处理缺失值时具有相同的样本\n",
    "common_idx = X.index.intersection(y.index)\n",
    "X = X.loc[common_idx]\n",
    "y = y.loc[common_idx]\n",
    "\n",
    "# 删除X和y中的缺失值，不使用inplace=True\n",
    "X = X.dropna()\n",
    "y = y.dropna()\n",
    "\n",
    "# 检查X和y的形状以确保样本数量相同\n",
    "print(X.shape, y.shape)\n",
    "\n",
    "# 划分数据集\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 使用线性回归模型进行训练和预测\n",
    "lr = LinearRegression()\n",
    "lr.fit(X_train, y_train)\n",
    "y_pred = lr.predict(X_test)\n",
    "\n",
    "# 评估模型性能\n",
    "mse = mean_squared_error(y_test, y_pred)\n",
    "r2 = r2_score(y_test, y_pred)\n",
    "\n",
    "print(f\"MSE: {mse}\")\n",
    "print(f\"R2 Score: {r2}\")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
