{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "257d19a5-d41c-4d9d-b9f5-0d9898b40835",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Device: cpu\n"
     ]
    }
   ],
   "source": [
    "import os, math, random\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from typing import Tuple\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "\n",
    "SEED = 42\n",
    "random.seed(SEED); np.random.seed(SEED); torch.manual_seed(SEED)\n",
    "\n",
    "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(\"Device:\", DEVICE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "26054c88-2dbf-42cd-994a-3d1c252697e0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X shape: (237, 17) y shape: (237, 1)\n"
     ]
    }
   ],
   "source": [
    "# 读取数据\n",
    "exp_file = \"Kraken monophosphine coordinates AD Descriptors.xlsx\"\n",
    "exp_sheet = \"AD reduced\"                                \n",
    "df = pd.read_excel(exp_file,exp_sheet, header = 0)\n",
    "df = df.iloc[:,14:]\n",
    "\n",
    "# 指定特征列\n",
    "feature_cols = [c for c in df.columns if c != 'vbur_ratio_vbur_vtot']\n",
    "X = df[feature_cols].values.astype(np.float32)\n",
    "y = df['vbur_ratio_vbur_vtot'].values.astype(np.float32).reshape(-1, 1)\n",
    "\n",
    "print(\"X shape:\", X.shape, \"y shape:\", y.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "021ab2f9-7eed-43ca-b500-91643d22068c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# data loader\n",
    "class TabDataset(Dataset):\n",
    "    def __init__(self, X: np.ndarray, y: np.ndarray):\n",
    "        self.X = torch.from_numpy(X)\n",
    "        self.y = torch.from_numpy(y)\n",
    "    def __len__(self):\n",
    "        return len(self.X)\n",
    "    def __getitem__(self, idx):\n",
    "        return self.X[idx], self.y[idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "48fee569-45f1-4c2e-a957-155dd30d00b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 3. Model: Feature Attention + MLP + MC Dropout\n",
    "# - FeatureAttention：学习每个特征的权重（通道注意力，类似 Squeeze-Excitation 的简化版本）\n",
    "# - MLP 主干：两层感知机\n",
    "# - 输出：均值 mu(x) 与 log_var(x)（异方差高斯），训练用 NLL；预测可做 MC 采样估计不确定性\n",
    "\n",
    "# %%\n",
    "class FeatureAttention(nn.Module):\n",
    "# 对输入特征维度做注意力加权（通道注意力），轻量且适合表格特征。\n",
    "    def __init__(self, d_in: int, hidden: int = 32):\n",
    "        super().__init__()\n",
    "        self.net = nn.Sequential(\n",
    "            nn.Linear(d_in, hidden),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(hidden, d_in),\n",
    "            nn.Sigmoid()  # 输出[0,1]，作为每个特征的权重门控\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        # x: (B, d_in)\n",
    "        w = self.net(x)            # (B, d_in)\n",
    "        return x * w               # 特征逐维重加权"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9d11061d-dce9-4eef-939f-43e12ee2dcae",
   "metadata": {},
   "outputs": [],
   "source": [
    "class RegressorHet(nn.Module):\n",
    "# 带异方差输出的回归网络 + MC Dropout\n",
    "    def __init__(self, d_in: int, width: int = 64, p_drop: float = 0.2):\n",
    "        super().__init__()\n",
    "        self.attn = FeatureAttention(d_in, hidden=32)\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(d_in, width),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(p_drop),\n",
    "            nn.Linear(width, width),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(p_drop),\n",
    "        )\n",
    "        # 输出均值与 log_var\n",
    "        self.head_mu = nn.Linear(width, 1)\n",
    "        self.head_lv = nn.Linear(width, 1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.attn(x)\n",
    "        h = self.mlp(x)\n",
    "        mu = self.head_mu(h)\n",
    "        log_var = self.head_lv(h).clamp(min=-10, max=5)  # 稳定数值\n",
    "        return mu, log_var"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "7630fe77-c5b3-4d89-8a21-002e15d12da6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def heteroscedastic_nll(y_true, mu, log_var):\n",
    "    #  采用 NLL 损失: 0.5*(log_var + (y-mu)^2/exp(log_var))\n",
    "    return 0.5 * (log_var + (y_true - mu)**2 / torch.exp(log_var))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "ffc4bdfc-3e2d-4dce-b1af-49ae929d72c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# - 采用标准化（根据训练折 fit），验证折仅 transform\n",
    "\n",
    "def train_one_fold(X_tr, y_tr, X_val, y_val, epochs=300, batch_size=32, lr=1e-3, patience=30):\n",
    "    # 标准化（仅用训练集 fit）\n",
    "    scaler = StandardScaler().fit(X_tr)\n",
    "    X_tr_s = scaler.transform(X_tr)\n",
    "    X_val_s = scaler.transform(X_val)\n",
    "\n",
    "    ds_tr = TabDataset(X_tr_s, y_tr)\n",
    "    ds_val = TabDataset(X_val_s, y_val)\n",
    "    dl_tr = DataLoader(ds_tr, batch_size=batch_size, shuffle=True, drop_last=False)\n",
    "    dl_val = DataLoader(ds_val, batch_size=256, shuffle=False, drop_last=False)\n",
    "\n",
    "    model = RegressorHet(d_in=X_tr.shape[1], width=64, p_drop=0.2).to(DEVICE)\n",
    "    opt = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)\n",
    "\n",
    "    best_mae, best_state, wait = float('inf'), None, 0\n",
    "\n",
    "    for ep in range(1, epochs+1):\n",
    "        model.train()\n",
    "        tr_loss = 0.0\n",
    "        for xb, yb in dl_tr:\n",
    "            xb, yb = xb.to(DEVICE), yb.to(DEVICE)\n",
    "            mu, lv = model(xb)\n",
    "            loss = heteroscedastic_nll(yb, mu, lv).mean()\n",
    "            opt.zero_grad(); loss.backward(); opt.step()\n",
    "            tr_loss += loss.item() * len(xb)\n",
    "        tr_loss /= len(ds_tr)\n",
    "\n",
    "        # 验证\n",
    "        model.eval()\n",
    "        with torch.no_grad():\n",
    "            mu_list, y_list = [], []\n",
    "            for xb, yb in dl_val:\n",
    "                xb = xb.to(DEVICE)\n",
    "                mu, _ = model(xb)  # 验证用均值评估\n",
    "                mu_list.append(mu.cpu().numpy())\n",
    "                y_list.append(yb.numpy())\n",
    "        y_pred = np.vstack(mu_list).ravel()\n",
    "        y_true = np.vstack(y_list).ravel()\n",
    "        val_mae = np.mean(np.abs(y_true - y_pred))\n",
    "\n",
    "        if val_mae < best_mae - 1e-4:\n",
    "            best_mae = val_mae\n",
    "            best_state = {k: v.cpu().clone() for k, v in model.state_dict().items()}\n",
    "            wait = 0\n",
    "        else:\n",
    "            wait += 1\n",
    "\n",
    "        if ep % 25 == 0 or ep == 1:\n",
    "            print(f\"[Epoch {ep:03d}] train_nll={tr_loss:.4f}  val_MAE={val_mae:.4f} (best {best_mae:.4f})\")\n",
    "\n",
    "        if wait >= patience:\n",
    "            print(f\"Early stop at epoch {ep}, best val MAE={best_mae:.4f}\")\n",
    "            break\n",
    "\n",
    "    model.load_state_dict(best_state)\n",
    "    return model, scaler, best_mae"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "207e5232-995f-4886-902e-a7aaa61bfad2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Epoch 001] train_nll=-0.0355  val_MAE=0.1437 (best 0.1437)\n",
      "[Epoch 025] train_nll=-1.4878  val_MAE=0.0420 (best 0.0314)\n",
      "[Epoch 050] train_nll=-2.3540  val_MAE=0.0394 (best 0.0208)\n",
      "[Epoch 075] train_nll=-2.6942  val_MAE=0.0216 (best 0.0208)\n",
      "Early stop at epoch 79, best val MAE=0.0208\n",
      "[Fold 1]  MAE=0.021  RMSE=0.026  R2=0.704\n",
      "[Epoch 001] train_nll=-0.0459  val_MAE=0.1875 (best 0.1875)\n",
      "[Epoch 025] train_nll=-1.4829  val_MAE=0.0609 (best 0.0422)\n",
      "[Epoch 050] train_nll=-2.0816  val_MAE=0.0491 (best 0.0348)\n",
      "[Epoch 075] train_nll=-2.6943  val_MAE=0.0282 (best 0.0234)\n",
      "[Epoch 100] train_nll=-2.6359  val_MAE=0.0274 (best 0.0202)\n",
      "[Epoch 125] train_nll=-2.9048  val_MAE=0.0254 (best 0.0202)\n",
      "[Epoch 150] train_nll=-3.2091  val_MAE=0.0210 (best 0.0180)\n",
      "[Epoch 175] train_nll=-3.2430  val_MAE=0.0249 (best 0.0169)\n",
      "Early stop at epoch 188, best val MAE=0.0169\n",
      "[Fold 2]  MAE=0.017  RMSE=0.029  R2=0.774\n",
      "[Epoch 001] train_nll=0.0762  val_MAE=0.2137 (best 0.2137)\n",
      "[Epoch 025] train_nll=-1.6473  val_MAE=0.0247 (best 0.0247)\n",
      "[Epoch 050] train_nll=-2.4697  val_MAE=0.0256 (best 0.0212)\n",
      "[Epoch 075] train_nll=-2.5992  val_MAE=0.0190 (best 0.0182)\n",
      "[Epoch 100] train_nll=-2.8605  val_MAE=0.0284 (best 0.0161)\n",
      "[Epoch 125] train_nll=-2.9996  val_MAE=0.0167 (best 0.0154)\n",
      "[Epoch 150] train_nll=-2.8609  val_MAE=0.0146 (best 0.0145)\n",
      "[Epoch 175] train_nll=-2.9559  val_MAE=0.0175 (best 0.0141)\n",
      "Early stop at epoch 184, best val MAE=0.0141\n",
      "[Fold 3]  MAE=0.014  RMSE=0.020  R2=0.757\n",
      "[Epoch 001] train_nll=-0.0180  val_MAE=0.1039 (best 0.1039)\n",
      "[Epoch 025] train_nll=-1.4365  val_MAE=0.0248 (best 0.0248)\n",
      "[Epoch 050] train_nll=-2.2548  val_MAE=0.0292 (best 0.0185)\n",
      "[Epoch 075] train_nll=-2.6851  val_MAE=0.0167 (best 0.0151)\n",
      "[Epoch 100] train_nll=-3.1070  val_MAE=0.0133 (best 0.0126)\n",
      "[Epoch 125] train_nll=-3.1846  val_MAE=0.0120 (best 0.0120)\n",
      "[Epoch 150] train_nll=-3.1630  val_MAE=0.0136 (best 0.0117)\n",
      "[Epoch 175] train_nll=-3.0807  val_MAE=0.0142 (best 0.0109)\n",
      "Early stop at epoch 183, best val MAE=0.0109\n",
      "[Fold 4]  MAE=0.011  RMSE=0.014  R2=0.888\n",
      "[Epoch 001] train_nll=-0.0759  val_MAE=0.0540 (best 0.0540)\n",
      "[Epoch 025] train_nll=-1.8870  val_MAE=0.0376 (best 0.0299)\n",
      "[Epoch 050] train_nll=-2.7103  val_MAE=0.0418 (best 0.0251)\n",
      "[Epoch 075] train_nll=-2.8627  val_MAE=0.0283 (best 0.0213)\n",
      "[Epoch 100] train_nll=-3.1743  val_MAE=0.0231 (best 0.0202)\n",
      "Early stop at epoch 107, best val MAE=0.0202\n",
      "[Fold 5]  MAE=0.020  RMSE=0.028  R2=0.804\n",
      "\n",
      "CV summary (mean ± std):\n",
      "MAE: 0.017 ± 0.004\n",
      "RMSE: 0.023 ± 0.006\n",
      "R2: 0.785 ± 0.061\n"
     ]
    }
   ],
   "source": [
    "# - 使用重复/多折交叉验证， KFold=5\n",
    "# - 记录每折 MAE / RMSE / R^2\n",
    "\n",
    "# %%\n",
    "def evaluate_metrics(y_true, y_pred):\n",
    "    mae = np.mean(np.abs(y_true - y_pred))\n",
    "    rmse = math.sqrt(np.mean((y_true - y_pred)**2))\n",
    "    # R^2\n",
    "    ss_res = np.sum((y_true - y_pred)**2)\n",
    "    ss_tot = np.sum((y_true - np.mean(y_true))**2)\n",
    "    r2 = 1 - ss_res/ss_tot if ss_tot > 0 else float('nan')\n",
    "    return mae, rmse, r2\n",
    "\n",
    "kf = KFold(n_splits=5, shuffle=True, random_state=SEED)\n",
    "fold_metrics = []\n",
    "\n",
    "for i, (tr_idx, val_idx) in enumerate(kf.split(X), 1):\n",
    "    X_tr, y_tr = X[tr_idx], y[tr_idx]\n",
    "    X_val, y_val = X[val_idx], y[val_idx]\n",
    "    model, scaler, best_mae = train_one_fold(X_tr, y_tr, X_val, y_val,\n",
    "                                             epochs=300, batch_size=32, lr=1e-3, patience=30)\n",
    "    # 验证集预测\n",
    "    X_val_s = scaler.transform(X_val)\n",
    "    with torch.no_grad():\n",
    "        model.eval()\n",
    "        mu, _ = model(torch.from_numpy(X_val_s).to(DEVICE))\n",
    "    y_pred = mu.cpu().numpy().ravel()\n",
    "    mae, rmse, r2 = evaluate_metrics(y_val.ravel(), y_pred)\n",
    "    fold_metrics.append((mae, rmse, r2))\n",
    "    print(f\"[Fold {i}]  MAE={mae:.3f}  RMSE={rmse:.3f}  R2={r2:.3f}\")\n",
    "\n",
    "print(\"\\nCV summary (mean ± std):\")\n",
    "for name, idx in zip([\"MAE\",\"RMSE\",\"R2\"], [0,1,2]):\n",
    "    arr = np.array([m[idx] for m in fold_metrics])\n",
    "    print(f\"{name}: {arr.mean():.3f} ± {arr.std():.3f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "093c16d4-1f55-4bbf-9e42-c5f1c6bac705",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Example mean/std: [0.18449293 0.1960745  0.20814781 0.19426695 0.21940102] [0.01144111 0.0127009  0.01149665 0.01395399 0.01759798]\n"
     ]
    }
   ],
   "source": [
    "# - 推理期启用 dropout：多次采样得到均值 & 标准差（作为不确定性指标）\n",
    "# - 给出 95% 置信区间示例（正态近似）\n",
    "\n",
    "def enable_dropout(model: nn.Module):\n",
    "\n",
    "    for m in model.modules():\n",
    "        if isinstance(m, nn.Dropout):\n",
    "            m.train()\n",
    "\n",
    "def mc_predict(model, scaler, X_pool, n_samples=50):\n",
    "    Xp = torch.from_numpy(scaler.transform(X_pool)).to(DEVICE)\n",
    "    model.eval()\n",
    "    enable_dropout(model)  # 开启MC\n",
    "    preds = []\n",
    "    with torch.no_grad():\n",
    "        for _ in range(n_samples):\n",
    "            mu, _ = model(Xp)\n",
    "            preds.append(mu.cpu().numpy())\n",
    "    preds = np.stack(preds, axis=0).squeeze(-1)  # (S, N)\n",
    "    mean = preds.mean(axis=0)\n",
    "    std = preds.std(axis=0)\n",
    "    return mean, std\n",
    "\n",
    "# 对全量数据做一次 MC 推理（实际使用中可对未实验的候选条件做排序）\n",
    "mean_pred, std_pred = mc_predict(model, scaler, X)\n",
    "print(\"Example mean/std:\", mean_pred[:5], std_pred[:5])\n",
    "\n",
    "# 计算 95% CI（正态近似）\n",
    "ci_low  = mean_pred - 1.95*std_pred\n",
    "ci_high = mean_pred + 1.95*std_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "30058967-41f0-41a8-8441-3b1689840747",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    0.188390\n",
       "1    0.218645\n",
       "2    0.220175\n",
       "3    0.203211\n",
       "4    0.212085\n",
       "Name: vbur_ratio_vbur_vtot, dtype: float64"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.iloc[:5,-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "3deda51e-7d41-4758-9c6d-f58a19c8bdeb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>idx</th>\n",
       "      <th>pred_mean</th>\n",
       "      <th>pred_std</th>\n",
       "      <th>score</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>227</td>\n",
       "      <td>0.298023</td>\n",
       "      <td>0.044358</td>\n",
       "      <td>0.922145</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>20</td>\n",
       "      <td>0.336004</td>\n",
       "      <td>0.037360</td>\n",
       "      <td>0.900191</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>222</td>\n",
       "      <td>0.300502</td>\n",
       "      <td>0.041392</td>\n",
       "      <td>0.884930</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>232</td>\n",
       "      <td>0.311403</td>\n",
       "      <td>0.031392</td>\n",
       "      <td>0.764653</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>233</td>\n",
       "      <td>0.305626</td>\n",
       "      <td>0.030521</td>\n",
       "      <td>0.740390</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>75</td>\n",
       "      <td>0.291460</td>\n",
       "      <td>0.030620</td>\n",
       "      <td>0.712763</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>19</td>\n",
       "      <td>0.290725</td>\n",
       "      <td>0.028989</td>\n",
       "      <td>0.687991</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>203</td>\n",
       "      <td>0.236177</td>\n",
       "      <td>0.036449</td>\n",
       "      <td>0.682580</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>231</td>\n",
       "      <td>0.287147</td>\n",
       "      <td>0.028924</td>\n",
       "      <td>0.679727</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>12</td>\n",
       "      <td>0.285799</td>\n",
       "      <td>0.027432</td>\n",
       "      <td>0.655692</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>234</td>\n",
       "      <td>0.295837</td>\n",
       "      <td>0.025813</td>\n",
       "      <td>0.653177</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>71</td>\n",
       "      <td>0.243954</td>\n",
       "      <td>0.031652</td>\n",
       "      <td>0.630096</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>47</td>\n",
       "      <td>0.268516</td>\n",
       "      <td>0.027694</td>\n",
       "      <td>0.623996</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>67</td>\n",
       "      <td>0.277382</td>\n",
       "      <td>0.025078</td>\n",
       "      <td>0.604869</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>98</td>\n",
       "      <td>0.260314</td>\n",
       "      <td>0.025912</td>\n",
       "      <td>0.581770</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>6</td>\n",
       "      <td>0.266500</td>\n",
       "      <td>0.024867</td>\n",
       "      <td>0.579546</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>46</td>\n",
       "      <td>0.276547</td>\n",
       "      <td>0.023257</td>\n",
       "      <td>0.577180</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>187</td>\n",
       "      <td>0.250608</td>\n",
       "      <td>0.026814</td>\n",
       "      <td>0.574741</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>149</td>\n",
       "      <td>0.226127</td>\n",
       "      <td>0.029772</td>\n",
       "      <td>0.566750</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>139</td>\n",
       "      <td>0.272518</td>\n",
       "      <td>0.023090</td>\n",
       "      <td>0.566534</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>170</td>\n",
       "      <td>0.264859</td>\n",
       "      <td>0.023936</td>\n",
       "      <td>0.562910</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>200</td>\n",
       "      <td>0.247111</td>\n",
       "      <td>0.024408</td>\n",
       "      <td>0.533251</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>26</td>\n",
       "      <td>0.265946</td>\n",
       "      <td>0.021660</td>\n",
       "      <td>0.532680</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>125</td>\n",
       "      <td>0.261360</td>\n",
       "      <td>0.022141</td>\n",
       "      <td>0.530131</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>166</td>\n",
       "      <td>0.252384</td>\n",
       "      <td>0.023160</td>\n",
       "      <td>0.526272</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>51</td>\n",
       "      <td>0.249535</td>\n",
       "      <td>0.023027</td>\n",
       "      <td>0.518528</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>103</td>\n",
       "      <td>0.252560</td>\n",
       "      <td>0.022149</td>\n",
       "      <td>0.512206</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27</th>\n",
       "      <td>132</td>\n",
       "      <td>0.259338</td>\n",
       "      <td>0.020699</td>\n",
       "      <td>0.505422</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>10</td>\n",
       "      <td>0.262368</td>\n",
       "      <td>0.019397</td>\n",
       "      <td>0.493068</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>76</td>\n",
       "      <td>0.240062</td>\n",
       "      <td>0.022508</td>\n",
       "      <td>0.491706</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    idx  pred_mean  pred_std     score\n",
       "0   227   0.298023  0.044358  0.922145\n",
       "1    20   0.336004  0.037360  0.900191\n",
       "2   222   0.300502  0.041392  0.884930\n",
       "3   232   0.311403  0.031392  0.764653\n",
       "4   233   0.305626  0.030521  0.740390\n",
       "5    75   0.291460  0.030620  0.712763\n",
       "6    19   0.290725  0.028989  0.687991\n",
       "7   203   0.236177  0.036449  0.682580\n",
       "8   231   0.287147  0.028924  0.679727\n",
       "9    12   0.285799  0.027432  0.655692\n",
       "10  234   0.295837  0.025813  0.653177\n",
       "11   71   0.243954  0.031652  0.630096\n",
       "12   47   0.268516  0.027694  0.623996\n",
       "13   67   0.277382  0.025078  0.604869\n",
       "14   98   0.260314  0.025912  0.581770\n",
       "15    6   0.266500  0.024867  0.579546\n",
       "16   46   0.276547  0.023257  0.577180\n",
       "17  187   0.250608  0.026814  0.574741\n",
       "18  149   0.226127  0.029772  0.566750\n",
       "19  139   0.272518  0.023090  0.566534\n",
       "20  170   0.264859  0.023936  0.562910\n",
       "21  200   0.247111  0.024408  0.533251\n",
       "22   26   0.265946  0.021660  0.532680\n",
       "23  125   0.261360  0.022141  0.530131\n",
       "24  166   0.252384  0.023160  0.526272\n",
       "25   51   0.249535  0.023027  0.518528\n",
       "26  103   0.252560  0.022149  0.512206\n",
       "27  132   0.259338  0.020699  0.505422\n",
       "28   10   0.262368  0.019397  0.493068\n",
       "29   76   0.240062  0.022508  0.491706"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# %%\n",
    "num_take = 30\n",
    "score = 0.5* ( (mean_pred - mean_pred.min())/(mean_pred.ptp()+1e-9) ) + \\\n",
    "        0.5* ( (std_pred  - std_pred.min()) /(std_pred.ptp()+1e-9)   )\n",
    "rank = np.argsort(-score)  # 倒序\n",
    "# rank = np.argsort(-mean_pred)\n",
    "suggest_top = rank[:num_take]\n",
    "pd.DataFrame({\n",
    "    \"idx\": suggest_top,\n",
    "    \"pred_mean\": mean_pred[suggest_top],\n",
    "    \"pred_std\": std_pred[suggest_top],\n",
    "    \"score\": score[suggest_top],\n",
    "}).reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "72a16a91-2127-4320-b2f1-c9c0cd4afb07",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "203    0.152294\n",
      "149    0.166557\n",
      "98     0.243798\n",
      "76     0.244324\n",
      "71     0.253836\n",
      "200    0.253926\n",
      "6      0.257221\n",
      "139    0.262513\n",
      "51     0.263955\n",
      "10     0.268322\n",
      "46     0.269007\n",
      "132    0.273790\n",
      "125    0.274760\n",
      "26     0.275096\n",
      "103    0.275713\n",
      "47     0.278151\n",
      "231    0.278946\n",
      "187    0.279615\n",
      "12     0.281000\n",
      "166    0.281057\n",
      "67     0.286499\n",
      "170    0.296871\n",
      "234    0.300382\n",
      "222    0.310319\n",
      "19     0.314514\n",
      "232    0.332863\n",
      "75     0.338394\n",
      "233    0.348492\n",
      "227    0.382245\n",
      "20     0.405615\n",
      "Name: vbur_ratio_vbur_vtot, dtype: float64\n"
     ]
    }
   ],
   "source": [
    "aa = df.iloc[suggest_top,-1].sort_values()\n",
    "print(aa)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "d217677e-0d13-4e8e-9077-1e1f6823a487",
   "metadata": {},
   "outputs": [],
   "source": [
    "df_sorted = df.sort_values(by='vbur_ratio_vbur_vtot')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "e92acce4-4d03-45c4-b738-2a3688cd9821",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "200    0.253926\n",
      "6      0.257221\n",
      "28     0.260941\n",
      "48     0.261828\n",
      "139    0.262513\n",
      "51     0.263955\n",
      "183    0.264737\n",
      "10     0.268322\n",
      "46     0.269007\n",
      "114    0.270344\n",
      "132    0.273790\n",
      "125    0.274760\n",
      "26     0.275096\n",
      "103    0.275713\n",
      "47     0.278151\n",
      "231    0.278946\n",
      "187    0.279615\n",
      "12     0.281000\n",
      "166    0.281057\n",
      "67     0.286499\n",
      "115    0.286708\n",
      "170    0.296871\n",
      "234    0.300382\n",
      "222    0.310319\n",
      "19     0.314514\n",
      "232    0.332863\n",
      "75     0.338394\n",
      "233    0.348492\n",
      "227    0.382245\n",
      "20     0.405615\n",
      "Name: vbur_ratio_vbur_vtot, dtype: float64\n"
     ]
    }
   ],
   "source": [
    "bb = df_sorted.iloc[:,-1][-num_take:]\n",
    "print (bb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "id": "73dfef10-e0c3-4485-b647-c31cc7b1fd11",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8333333333333334\n"
     ]
    }
   ],
   "source": [
    "print(1-len(set(aa) - set(bb))/num_take)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "id": "6d3f4d71-9bad-474f-94ee-c889283ef93f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8333333333333334\n"
     ]
    }
   ],
   "source": [
    "print(1-len(set(bb) - set(aa))/num_take)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "id": "4fd30c68-b32e-477f-b69c-76eefffbce6d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{28, 48, 114, 115, 183}"
      ]
     },
     "execution_count": 119,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "set(bb) - set(aa)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "59756248-e9b2-4744-9118-4bbfe1c2a0a8",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
