{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "#Numpy==1.23.3,pandas==1.4.4,matplotlib==3.6.0,Glasspy==0.4.0\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import glasspy as gp\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "from glasspy.data import SciGlass\n",
    "source = SciGlass()\n",
    "Raw_data = source.data\n",
    "Raw_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "print(\"Data Size:\", Raw_data.shape)\n",
    "print(\"Data Categories:\", list(Raw_data.columns.levels[0]))\n",
    "CountData=[]\n",
    "for Cate in Raw_data.columns.levels[0]:\n",
    "    columns = Raw_data[Cate].columns\n",
    "    CountData+=[[Cate, list(columns), len(columns)]]\n",
    "CountData = pd.DataFrame(CountData, columns=[\"Category\", \"Columns\", \"Number\"])\n",
    "CountData"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "print(\"Total number of missing values in compounds: \", Raw_data[\"compounds\"].isnull().sum().sum())\n",
    "print(\"Total number of missing values in elements: \", Raw_data[\"elements\"].isnull().sum().sum())\n",
    "print(\"Total number of missing values in property: \", Raw_data[\"property\"].isnull().sum().sum())\n",
    "print(\"Total number of missing values in metadata: \", Raw_data[\"metadata\"].isnull().sum().sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "print(\"Number of samples missing all properties: \", (Raw_data[\"property\"].isnull()).all(axis=1).sum())\n",
    "print(\"Number of samples with only one property: \", ((~Raw_data[\"property\"].isnull()).sum(axis=1)==1).sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "P1=[\"\",\"Al2O3+Fe2O3\",\"MoO3+WO3\",\"CaO+MgO\",\"FeO+Fe2O3\",\"Li2O+Na2O+K2O\",\"Na2O+K2O\",\"F2O-1\",\"FemOn\",\"HF+H2O\",\"R2O\",\"R2O3\",\"R2O3\",\"RO\",\"RmOn\",]\n",
    "P2=[\"Ac\",\"Am\",\"Ar\",\"At\",\"Bk\",\"Cf\",\"Cm\",\"Es\",\"Fm\",\"Fr\",\"He\",\"Kr\",\"Ne\",\"Np\",\"Pa\",\"Pm\",\"Po\",\"Pu\",\"Ra\",\"Rn\",\"Xe\",]\n",
    "print(\"Already excluded part 1: \", len(set(Raw_data[\"compounds\"].columns)&set(P1))==0)\n",
    "print(\"Already excluded part 2: \", len(set(Raw_data[\"elements\"].columns)&set(P2))==0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "removed_compounds = [\"\",\"Al2O3+Fe2O3\",\"MoO3+WO3\",\"CaO+MgO\",\"FeO+Fe2O3\",\"Li2O+Na2O+K2O\",\"Na2O+K2O\",\"F2O-1\",\"FemOn\",\"HF+H2O\",\"R2O\",\"R2O3\",\"R2O3\",\"RO\",\"RmOn\",]\n",
    "removed_elements = [\"Ac\",\"Am\",\"Ar\",\"At\",\"Bk\",\"Cf\",\"Cm\",\"Es\",\"Fm\",\"Fr\",\"He\",\"Kr\",\"Ne\",\"Np\",\"Pa\",\"Pm\",\"Po\",\"Pu\",\"Ra\",\"Rn\",\"Th\",\"U\",\"Xe\",]\n",
    "targets = list(Raw_data[\"property\"].columns)\n",
    "propconf = {\"keep\": targets}\n",
    "compconf = {\"acceptable_sum_deviation\": 1,\n",
    "            \"final_sum\": 1,\n",
    "            \"return_weight\": False,\n",
    "            \"dropline\": removed_compounds,\n",
    "            \"drop_compound_with_element\": removed_elements,\n",
    "           }\n",
    "sg = SciGlass(False, propconf, compconf)\n",
    "sg.data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "CountData = []\n",
    "for Cate in sg.data.columns.levels[0]:\n",
    "    columns = sg.data[Cate].columns\n",
    "    CountData+=[[Cate, list(columns), len(columns)]]\n",
    "CountData = pd.DataFrame(CountData, columns = [\"Category\", \"Columns\", \"Number\"])\n",
    "CountData"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "Sum_property = sg.data[\"compounds\"].sum(axis=1)\n",
    "print(\"Number of abnormal samples: \", (np.abs(Sum_property-1.0)>1e-6).sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "#Maximum and minimum values of properties\n",
    "treatment = { \"AbbeNum\": {\"max\": 115,},\n",
    "              \"Cp293K\": {\"max\": 2000,},\n",
    "              \"Cp473K\": {\"max\": 2000,},\n",
    "              \"Cp673K\": {\"max\": 3000,},\n",
    "              \"Cp1073K\": {\"min\": 500,\"max\": 2500,},\n",
    "              \"Cp1273K\": {\"min\": 500,\"max\": 3000,},\n",
    "              \"Cp1473K\": {\"min\": 500,\"max\": 3000,},\n",
    "              \"Cp1673K\": {\"min\": 500,\"max\": 2250,},\n",
    "              \"CTE328K\": {\"min\": 10**-6.5,\"log\": True,},\n",
    "              \"CTE373K\": {\"min\": 10**-6.5,\"log\": True,},\n",
    "              \"CTE433K\": {\"min\": 10**-8,\"log\": True,},\n",
    "              \"CTE483K\": {\"min\": 10**-7,\"log\": True,},\n",
    "              \"CTE623K\": {\"min\": 10**-6.5,\"log\": True,},\n",
    "              \"CTEbelowTg\": {\"min\": 0,\"log\": True,},\n",
    "              \"Density293K\": {\"min\": 1, \"max\": 10,},\n",
    "              \"CTE623K\": {\"log\": True,},\n",
    "              \"MaxGrowthVelocity\": {\"min\": 1e-10,\"log\": True,},\n",
    "              \"MeanDispersion\": {\"log\": True,},\n",
    "              \"Microhardness\": {\"max\": 15,},\n",
    "              \"Permittivity\": {\"max\": 50,},\n",
    "              \"PoissonRatio\": {\"min\": 0,\"max\": 1,},\n",
    "              \"RefractiveIndex\": {\"max\": 4,},\n",
    "              \"RefractiveIndexHigh\": {\"min\": 1.7,\"max\":3.5,},\n",
    "              \"Resistivity273K\": {\"log\": True,\"max\": 1e40,},\n",
    "              \"Resistivity373K\": {\"log\": True,\"max\": 1e28,},\n",
    "              \"Resistivity423K\": {\"log\": True,},\n",
    "              \"Resistivity573K\": {\"log\": True,},\n",
    "              \"Resistivity1073K\": {\"max\": 10**4,\"log\": True,},\n",
    "              \"Resistivity1273K\": {\"max\": 10**5,\"log\": True,},\n",
    "              \"Resistivity1473K\": {\"log\": True,},\n",
    "              \"Resistivity1673K\": {\"log\": True,},\n",
    "              \"SurfaceTension1473K\": {\"max\": 0.5,},\n",
    "              \"SurfaceTension1573K\": {\"max\": 0.7,},\n",
    "              \"SurfaceTension1673K\": {\"max\": 0.7,},\n",
    "              \"SurfaceTensionAboveTg\": {\"max\": 0.8,},\n",
    "              \"Viscosity773K\": {\"log\": True,},\n",
    "              \"Viscosity873K\": {\"log\": True,},\n",
    "              \"Viscosity973K\": {\"log\": True,},\n",
    "              \"Viscosity1073K\": {\"log\": True,},\n",
    "              \"Viscosity1173K\": {\"log\": True,},\n",
    "              \"Viscosity1273K\": {\"log\": True,},\n",
    "              \"Viscosity1373K\": {\"log\": True,},\n",
    "              \"Viscosity1473K\": {\"log\": True,},\n",
    "              \"Viscosity1573K\": {\"log\": True,},\n",
    "              \"Viscosity1673K\": {\"log\": True,},\n",
    "              \"Viscosity1773K\": {\"max\": 10**10,\"log\": True,},\n",
    "              \"Viscosity1873K\": {\"max\": 10**10,\"log\": True,},\n",
    "              \"Viscosity2073K\": {\"max\": 10**8,\"log\": True,},\n",
    "              \"Viscosity2273K\": {\"max\": 10**8,\"log\": True,},\n",
    "              \"Viscosity2473K\": {\"log\": True,},\n",
    "              \"T3\": {\"max\": 2350,},\n",
    "              \"T4\": {\"max\": 2000,},\n",
    "              \"TangentOfLossAngle\": {\"min\": 1e-4,\"max\": 0.16,\"log\": True,},\n",
    "              \"ThermalConductivity\": {\"max\": 6,},\n",
    "              \"TresistivityIs1MOhm.m\": {\"max\": 2000,},\n",
    "              \"YoungModulus\": {\"max\": 175,},\n",
    "              \"Tsoft\": {\"max\": 1600,},}\n",
    "\n",
    "min_cols = [ col for col in treatment if treatment[col].get(\"min\", None) is not None ]\n",
    "max_cols = [ col for col in treatment if treatment[col].get(\"max\", None) is not None ]\n",
    "log_cols = [ (\"property\",col) for col in treatment if treatment[col].get(\"log\", False) ]\n",
    "\n",
    "#Exclude abnormal values\n",
    "for col in min_cols:\n",
    "    logic = sg.data[\"property\"][col] < treatment[col][\"min\"]\n",
    "    sg.data.loc[logic, (\"property\", col)] = np.nan\n",
    "for col in max_cols:\n",
    "    logic = sg.data[\"property\"][col] > treatment[col][\"max\"]\n",
    "    sg.data.loc[logic, (\"property\", col)] = np.nan\n",
    "sg.data[log_cols] = sg.data[log_cols].apply(np.log10, axis=1)\n",
    "sg.data\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "sg.elements_from_compounds(final_sum=1, compounds_in_weight=False)\n",
    "Pure_data=sg.data.copy()\n",
    "comp_cols = list(Pure_data[\"elements\"].columns)\n",
    "comp_cols2 = list(Pure_data[\"compounds\"].columns)\n",
    "prop_cols = list(Pure_data[\"property\"].columns)\n",
    "Pure_data = Pure_data[[\"elements\",\"compounds\",\"property\"]].droplevel(0, axis=1)\n",
    "#\"compounds\" contains 72 columns with the same name as \"elements\", can not be directly indexed, to rename\n",
    "Names = []\n",
    "for i,n in enumerate(list(Pure_data.columns)):\n",
    "    if i>=len(comp_cols) and n in comp_cols:\n",
    "        n=n+\"_\"\n",
    "    Names+=[n]\n",
    "for i,n in enumerate(comp_cols2):\n",
    "    if n in comp_cols:\n",
    "        comp_cols2[i]=n+\"_\"\n",
    "Pure_data.columns=Names\n",
    "Pure_data[comp_cols] = Pure_data[comp_cols].round(3)\n",
    "Pure_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "grouped = Pure_data.groupby(comp_cols, sort = False)\n",
    "df = getattr(grouped, \"median\")().reset_index()\n",
    "df = {\"elements\": df[comp_cols], \"compounds\":df[comp_cols2],\"property\": df[prop_cols]}\n",
    "df = pd.concat(df, axis=1, join=\"inner\")\n",
    "df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "#print(\"Number of samples missing all properties: \", (df[\"property\"].isnull()).all(axis=1).sum())\n",
    "CountData = []\n",
    "for Cate in df.columns.levels[0]:\n",
    "    columns = df[Cate].columns\n",
    "    CountData+=[[Cate, list(columns), len(columns)]]\n",
    "CountData = pd.DataFrame(CountData, columns=[\"Category\",\"Columns\",\"Number\"])\n",
    "CountData\n",
    "\n",
    "from glasspy.predict import GlassNet\n",
    "GN = GlassNet()\n",
    "GN_data = GN._load_data()\n",
    "GN_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "CountData = []\n",
    "for Cate in GN_data.columns.levels[0]:\n",
    "    columns = GN_data[Cate].columns\n",
    "    CountData+=[[Cate, list(columns), len(columns)]]\n",
    "CountData = pd.DataFrame(CountData, columns=[\"Category\",\"Columns\",\"Number\"])\n",
    "CountData"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "set(df[\"property\"].columns)-set(GN_data[\"property\"].columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "Pre = GN.predict(sg.data[\"elements\"])\n",
    "property_index = np.array([True]*87)\n",
    "property_index[[76,77]] = False\n",
    "Real = sg.data[\"property\"].loc[:,property_index].reset_index(drop=True)\n",
    "Res = np.abs(Real-Pre)\n",
    "Res[[\"Author\",\"Year\"]] = sg.data[\"metadata\"][[\"Author\",\"Year\"]].reset_index(drop=True)\n",
    "Res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "print(\"Number of data group: \", len(Res[[\"Author\",\"Year\"]].drop_duplicates()))\n",
    "grouped = Res.groupby([\"Author\",\"Year\"], sort=False)\n",
    "Res_grouped = getattr(grouped, \"mean\")()\n",
    "Res_grouped"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "def Show_Res_By_Group(Show_Prop):\n",
    "    Res_grouped.reset_index(drop=True, inplace=True)\n",
    "    plt.figure(figsize=(12,4), dpi=120)\n",
    "    plt.grid(ls=\":\", c=\"gray\")\n",
    "    plt.xlabel(\"Group Index\")\n",
    "    plt.ylabel(\"Mean Residual of \"+Show_Prop)\n",
    "    plt.title(\"Mean Residual of \"+Show_Prop+\" v.s. Groups\")\n",
    "    plt.scatter(Res_grouped.index, Res_grouped[Show_Prop], s=3)\n",
    "    plt.show()\n",
    "Show_Res_By_Group(\"RefractiveIndex\")\n",
    "Show_Res_By_Group(\"Density293K\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "df = df.droplevel(0, axis=1)\n",
    "df.to_csv(\"My_data.csv\", index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "#Create a new Model.py and store the following code in it\n",
    "\n",
    "from sklearn.neighbors import KernelDensity\n",
    "import os\n",
    "\n",
    "def Importance_W(Y, condition = None, k=None, condition_type=None):\n",
    "    \n",
    "    N = len(Y)\n",
    "    Y_reshape = Y.reshape(-1, 1)\n",
    "    \n",
    "    bandwidth = 0.9 * np.std(Y) * len(Y)**(-1/5)\n",
    "    \n",
    "    kde = KernelDensity(\n",
    "        bandwidth=bandwidth, \n",
    "        kernel='gaussian',  \n",
    "        metric='euclidean'  \n",
    "    )\n",
    "    kde.fit(Y_reshape)\n",
    "    \n",
    "    W_Density = np.exp(kde.score_samples(Y_reshape))\n",
    "    \n",
    "    if condition == None:\n",
    "        W = W_Density\n",
    "    else:\n",
    "        W_Gauss = np.zeros(N)\n",
    "        sigema = Y.std()\n",
    "        if condition_type!=\"=\":\n",
    "            IF_condition = (Y>=condition) if condition_type==\">\" else (Y<=condition)\n",
    "            if k==None:\n",
    "                k_max = 100\n",
    "                k_min = 1e-5\n",
    "                Spacial_Sum = W_Density[IF_condition].sum()\n",
    "                while k_max-k_min>1e-5:\n",
    "                    k_center = (k_max+k_min)/2\n",
    "                    F = (np.exp(-(Y[~IF_condition]-condition)**2/(k_center*sigema)**2)*W_Density[~IF_condition]).sum()-Spacial_Sum\n",
    "                    if F>0:\n",
    "                        k_max = k_center\n",
    "                    elif F<0:\n",
    "                        k_min = k_center\n",
    "                    else:\n",
    "                        break\n",
    "                k = k_center\n",
    "            W_Gauss = IF_condition*1+(1-IF_condition)*np.exp(-(Y-condition)**2/(k*sigema)**2)\n",
    "        else:\n",
    "            W_Gauss = np.exp(-(Y-condition)**2/(k*sigema)**2)\n",
    "        W = W_Gauss*W_Density\n",
    "    \n",
    "    W = W/W.sum()*N\n",
    "    return W\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "df0 = pd.read_csv(\"My_data.csv\")\n",
    "df = {\"elements\": df0.iloc[:, :74], \"compounds\": df0.iloc[:, 74:-87], \"property\": df0.iloc[:,-87:]}\n",
    "df = pd.concat(df, axis=1, join=\"inner\")\n",
    "\n",
    "def Testing(Target, IFCondition, condition_type, Rate):\n",
    "    \"\"\"\n",
    "    Target: 'Density293K','AbbeNum','RefractiveIndex','Tg','YoungModulus'\n",
    "    IFCondition: Whether the learner uses conditional weighting\n",
    "    Rate: Condition Percentage (0.995, 0.99, 0.98, 0.95)\n",
    "    condition_type: Condition Symbols\n",
    "    \"\"\"\n",
    "    # Generate training data\n",
    "    X_train = df0.iloc[:, :-87] #all\n",
    "    Y_train = df[\"property\"][Target]\n",
    "    NotNull = ~Y_train.isnull()\n",
    "    X_train = X_train[NotNull].reset_index(drop=True)\n",
    "    Y_train = Y_train[NotNull].reset_index(drop=True)\n",
    "    N_Valid = NotNull.sum() #Number of valid samples\n",
    "    \n",
    "    #Conditions\n",
    "    condition = Y_train.quantile(Rate)\n",
    "    \n",
    "    k = None #The original hyperparameters need to be optimized\n",
    "    \n",
    "    print(\"样本条件: \"+Target+condition_type+str(condition)+\" (\"+str(Rate*100)+\"%)\")\n",
    "    #Sample weights\n",
    "    if IFCondition:\n",
    "        W = Importance_W(np.array(Y_train), condition=condition, condition_type=condition_type)\n",
    "    else:\n",
    "        W = np.ones(N_Valid)\n",
    "    \n",
    "    Folds = 10\n",
    "    \n",
    "    #Training Model\n",
    "    ModelList = [CatBoostRegressor()]*Folds\n",
    "    \n",
    "    #Cross-validation\n",
    "    KF = KFold(n_splits=Folds, shuffle=True, random_state=0)\n",
    "    Fold_Y_pre = pd.Series(np.zeros(N_Valid))\n",
    "    for i, (train_index, test_index) in enumerate(KF.split(X_train, Y_train)):\n",
    "        Fold_X_train, Fold_X_test = X_train.loc[train_index], X_train.loc[test_index]\n",
    "        Fold_Y_train, Fold_Y_test = Y_train.loc[train_index], Y_train.loc[test_index]\n",
    "        \n",
    "        #Fitting the model\n",
    "        print(\"正在 \"+Target+\" 任务上拟合第 \"+str(i+1)+\" 折数据的模型\")\n",
    "        ModelList[i].fit(Fold_X_train, Fold_Y_train, sample_weight = W[train_index])\n",
    "        \n",
    "        #Statistical results\n",
    "        Fold_Y_pre.loc[test_index] = ModelList[i].predict(Fold_X_test)\n",
    "    \n",
    "    #Output generalization performance RMSE/R2\n",
    "    #print(np.sqrt(((Fold_Y_pre-Y_train)**2).mean()), r2(Y_train, Fold_Y_pre))\n",
    "    rmse_all = np.sqrt(((Fold_Y_pre-Y_train)**2).mean())\n",
    "    r2_all = r2(Y_train, Fold_Y_pre)\n",
    "    print(\"\\n全区域预测性能:\")\n",
    "    print(\"RMSE: {:.4f}, R²: {:.4f}\".format(rmse_all, r2_all))\n",
    "\n",
    "    IF_condition = (Y_train>=condition) if condition_type==\">\" else (Y_train<=condition)\n",
    "    print(\"\\n极端条件下合格样本数： \", IF_condition.sum())\n",
    "    \n",
    "\n",
    "    Y_extreme = Y_train[IF_condition]\n",
    "    Y_pred_extreme = Fold_Y_pre[IF_condition]\n",
    "    rmse_extreme = np.sqrt(((Y_pred_extreme - Y_extreme)**2).mean())\n",
    "    r2_extreme = r2(Y_extreme, Y_pred_extreme)\n",
    "\n",
    "\n",
    "    true_labels = IF_condition\n",
    "    pred_labels = (Fold_Y_pre>=condition) if condition_type==\">\" else (Fold_Y_pre<=condition)\n",
    "    \n",
    "\n",
    "    TP = np.sum((true_labels == True) & (pred_labels == True))\n",
    "    TN = np.sum((true_labels == False) & (pred_labels == False))\n",
    "    FP = np.sum((true_labels == False) & (pred_labels == True))\n",
    "    FN = np.sum((true_labels == True) & (pred_labels == False))\n",
    "    \n",
    "\n",
    "    precision = TP / (TP + FP) if (TP + FP) > 0 else 0\n",
    "    recall = TP / (TP + FN) if (TP + FN) > 0 else 0\n",
    "    f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0\n",
    "    accuracy = (TP + TN) / (TP + TN + FP + FN)\n",
    "    specificity = TN / (TN + FP) if (TN + FP) > 0 else 0\n",
    "    \n",
    "\n",
    "    thresholds = np.linspace(Y_train.min(), Y_train.max(), 100)\n",
    "    tpr_list = []\n",
    "    fpr_list = []\n",
    "    for thresh in thresholds:\n",
    "        pred_labels_t = (Fold_Y_pre >= thresh) if condition_type==\">\" else (Fold_Y_pre <= thresh)\n",
    "        TP_t = np.sum((true_labels == True) & (pred_labels_t == True))\n",
    "        FP_t = np.sum((true_labels == False) & (pred_labels_t == True))\n",
    "        FN_t = np.sum((true_labels == True) & (pred_labels_t == False))\n",
    "        TN_t = np.sum((true_labels == False) & (pred_labels_t == False))\n",
    "        tpr_t = TP_t / (TP_t + FN_t) if (TP_t + FN_t) > 0 else 0\n",
    "        fpr_t = FP_t / (FP_t + TN_t) if (FP_t + TN_t) > 0 else 0\n",
    "        tpr_list.append(tpr_t)\n",
    "        fpr_list.append(fpr_t)\n",
    "    \n",
    "    auc = np.trapz(tpr_list, fpr_list)\n",
    "    \n",
    "    plt.figure(figsize=(8, 6))\n",
    "    plt.plot(fpr_list, tpr_list, label=f'ROC curve (AUC = {auc:.2f})')\n",
    "    plt.plot([0, 1], [0, 1], 'k--')\n",
    "    plt.xlim([0.0, 1.0])\n",
    "    plt.ylim([0.0, 1.05])\n",
    "    plt.xlabel('False Positive Rate')\n",
    "    plt.ylabel('True Positive Rate')\n",
    "    plt.title(f'ROC Curve for {Target} ({condition_type}{Rate*100}%)')\n",
    "    plt.legend(loc=\"lower right\")\n",
    "    \n",
    "    plot_filename = f'results/plots/ROC_{Target}_{int(Rate*100)}.png'\n",
    "    os.makedirs('results/plots', exist_ok=True)\n",
    "    plt.savefig(plot_filename)\n",
    "    plt.close()\n",
    "    \n",
    "    print(f\"算法在极端区域的性能指标:\")\n",
    "    print(f\"RMSE: {rmse_extreme:.4f}, R²: {r2_extreme:.4f}\")\n",
    "    print(f\"Precision: {precision:.4f}, Recall: {recall:.4f}\")\n",
    "    print(f\"F1: {f1:.4f}, Accuracy: {accuracy:.4f}\")\n",
    "    print(f\"AUC: {auc:.4f}\")\n",
    "    print(\"\\n\\n\\n\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(f\"{Target} 在阈值 {Rate*100}% 下的性能指标\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    performance_data = {\n",
    "        '指标': ['全局RMSE', '全局R²', '极端区域RMSE', '极端区域R²', \n",
    "                'Precision', 'Recall', 'F1', 'Accuracy', 'AUC'],\n",
    "        '值': [rmse_all, r2_all, rmse_extreme, r2_extreme, \n",
    "              precision, recall, f1, accuracy, auc]\n",
    "    }\n",
    "    \n",
    "    performance_df = pd.DataFrame(performance_data)\n",
    "    print(performance_df.to_string(index=False))\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    final_model = CatBoostRegressor(verbose=False)\n",
    "    final_model.fit(X_train, Y_train, sample_weight=W)\n",
    "    \n",
    "    return {\n",
    "        'model': final_model,\n",
    "        'Target': Target + condition_type + str(Rate*100) + '%',\n",
    "        'condition': condition,\n",
    "        'condition_type': condition_type,\n",
    "        'RMSE_all': rmse_all,\n",
    "        'R2_all': r2_all,\n",
    "        'RMSE_extreme': rmse_extreme,\n",
    "        'R2_extreme': r2_extreme,\n",
    "        'Precision': precision,\n",
    "        'Recall': recall,\n",
    "        'F1': f1,\n",
    "        'Accuracy': accuracy,\n",
    "        'AUC': auc,\n",
    "        'ROC_fpr': np.array(fpr_list),\n",
    "        'ROC_tpr': np.array(tpr_list)\n",
    "    }\n",
    "\n",
    "from catboost import CatBoostRegressor\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.metrics import r2_score as r2\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "#from xgboost import XGBRegressor\n",
    "Stat_List = []\n",
    "TargetList = [\"Density293K\",\"AbbeNum\",\"RefractiveIndex\",\"Tg\",\"YoungModulus\"]\n",
    "C_List = [\n",
    "    (\">\", 0.995), (\">\", 0.99), (\">\", 0.98), (\">\", 0.95)  # 更新阈值列表\n",
    "]\n",
    "\n",
    "def predict_with_saved_model(target, X_new, condition=None, condition_type=None):\n",
    "   \n",
    "    model_path = f'models/{target}_model.model'\n",
    "    if not os.path.exists(model_path):\n",
    "        raise FileNotFoundError(f\"模型文件不存在: {model_path}\")\n",
    "    \n",
    "    model = CatBoostRegressor()\n",
    "    model.load_model(model_path)\n",
    "    predictions = model.predict(X_new)\n",
    "    \n",
    "    if condition is not None and condition_type is not None:\n",
    "        is_extreme = (predictions >= condition) if condition_type == \">\" else (predictions <= condition)\n",
    "        return predictions, is_extreme\n",
    "    return predictions\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "#Create a new train.py and store the following code in it\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from xgboost import XGBRegressor\n",
    "import os\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.metrics import r2_score as r2\n",
    "import json\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "from Model import Testing\n",
    "\n",
    "def train_and_save_model(target=\"Tg\", rates=[0.985,0.98,0.975,0.97,0.965,0.96,0.955, 0.95]):\n",
    "    \n",
    "    results_with_weights = []  \n",
    "    \n",
    "    print(\"\\n开始加权训练...\")\n",
    "    \n",
    "    table_data = {\n",
    "        '阈值(%)': [],\n",
    "        '全局RMSE': [],\n",
    "        '极端区域RMSE': []\n",
    "    }\n",
    "    \n",
    "    for rate in rates:\n",
    "        try:\n",
    "            metrics = Testing(target, True, \">\", rate)  \n",
    "            if metrics is None:\n",
    "                print(f\"警告：rate={rate} 的加权训练返回了None\")\n",
    "                continue\n",
    "            \n",
    "            results_with_weights.append(metrics)\n",
    "            \n",
    "            table_data['阈值(%)'].append(f\"{rate*100:.1f}\")\n",
    "            table_data['全局RMSE'].append(f\"{metrics['RMSE_all']:.4f}\")\n",
    "            table_data['极端区域RMSE'].append(f\"{metrics['RMSE_extreme']:.4f}\")\n",
    "            \n",
    "        except Exception as e:\n",
    "            print(f\"训练出错 (rate={rate}, weighted=True): {str(e)}\")\n",
    "            continue\n",
    "    \n",
    "    df = pd.DataFrame(table_data)\n",
    "    print(\"\\n各阈值下的模型性能对比：\")\n",
    "    print(\"=\"*50)\n",
    "    print(df.to_string(index=False))\n",
    "    print(\"=\"*50)\n",
    "    \n",
    "    if not results_with_weights:\n",
    "        raise ValueError(\"没有成功的训练结果，无法继续执行\")\n",
    "    \n",
    "    best_weighted_idx = min(range(len(results_with_weights)), \n",
    "                          key=lambda i: results_with_weights[i]['RMSE_extreme'])\n",
    "    best_weighted_model = results_with_weights[best_weighted_idx]\n",
    "    \n",
    "    target_str = best_weighted_model['Target']\n",
    "    best_rate = float(target_str.split('>')[-1].replace('%)','').strip('%'))/100\n",
    "    \n",
    "    print(f\"\\n最佳加权模型在阈值 {best_rate*100}% 处取得\")\n",
    "    print(f\"全局 RMSE: {best_weighted_model['RMSE_all']:.4f}\")\n",
    "    print(f\"极端区域 RMSE: {best_weighted_model['RMSE_extreme']:.4f}\")\n",
    "    \n",
    "    print(f\"\\n训练阈值 {best_rate*100}% 处的未加权模型...\")\n",
    "    try:\n",
    "        unweighted_model = Testing(target, False, \">\", best_rate)  # IFCondition=False 表示不使用加权\n",
    "        if unweighted_model is None:\n",
    "            raise ValueError(f\"未加权模型训练失败 (rate={best_rate})\")\n",
    "    except Exception as e:\n",
    "        raise Exception(f\"未加权模型训练出错: {str(e)}\")\n",
    "    \n",
    "    os.makedirs('models', exist_ok=True)\n",
    "    os.makedirs('results', exist_ok=True)\n",
    "    \n",
    "    weighted_model_filename = f'models/{target}_best_weighted_model.model'\n",
    "    best_weighted_model['model'].save_model(weighted_model_filename)\n",
    "    unweighted_model_filename = f'models/{target}_unweighted_model.model'\n",
    "    unweighted_model['model'].save_model(unweighted_model_filename)\n",
    "\n",
    "    model_info = {\n",
    "        'weighted': {\n",
    "            k: v for k, v in best_weighted_model.items() \n",
    "            if k != 'model' and not isinstance(v, np.ndarray)\n",
    "        },\n",
    "        'unweighted': {\n",
    "            k: v for k, v in unweighted_model.items() \n",
    "            if k != 'model' and not isinstance(v, np.ndarray)\n",
    "        }\n",
    "    }\n",
    "    \n",
    "    roc_data = {\n",
    "        'weighted': {\n",
    "            'fpr': best_weighted_model['ROC_fpr'],\n",
    "            'tpr': best_weighted_model['ROC_tpr']\n",
    "        },\n",
    "        'unweighted': {\n",
    "            'fpr': unweighted_model['ROC_fpr'],\n",
    "            'tpr': unweighted_model['ROC_tpr']\n",
    "        }\n",
    "    }\n",
    "    \n",
    "    roc_data_json = {\n",
    "        'weighted': {\n",
    "            'fpr': roc_data['weighted']['fpr'].tolist(),\n",
    "            'tpr': roc_data['weighted']['tpr'].tolist()\n",
    "        },\n",
    "        'unweighted': {\n",
    "            'fpr': roc_data['unweighted']['fpr'].tolist(),\n",
    "            'tpr': roc_data['unweighted']['tpr'].tolist()\n",
    "        }\n",
    "    }\n",
    "    \n",
    "    with open(f'results/{target}_model_info.json', 'w') as f:\n",
    "        json.dump(model_info, f, indent=4)\n",
    "    \n",
    "    with open(f'results/{target}_roc_data.json', 'w') as f:\n",
    "        json.dump(roc_data_json, f, indent=4)\n",
    "    \n",
    "    print(\"\\n训练总结:\")\n",
    "    print(f\"\\n最佳加权模型 (阈值 {best_rate*100}%):\")\n",
    "    print(f\"全局 RMSE: {best_weighted_model['RMSE_all']:.4f}\")\n",
    "    print(f\"极端区域 RMSE: {best_weighted_model['RMSE_extreme']:.4f}\")\n",
    "    \n",
    "    print(f\"\\n对应未加权模型:\")\n",
    "    print(f\"极端区域 RMSE: {unweighted_model['RMSE_extreme']:.4f}\")\n",
    "    print(f\"极端区域 R²: {unweighted_model['R2_extreme']:.4f}\")\n",
    "    print(f\"F1 分数: {unweighted_model['F1']:.4f}\")\n",
    "    print(f\"AUC: {unweighted_model['AUC']:.4f}\")\n",
    "    \n",
    "    return {\n",
    "        'weighted': best_weighted_model,\n",
    "        'unweighted': unweighted_model\n",
    "    }\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    model_info = train_and_save_model(\"Tg\")\n",
    "    print(f\"\\n训练完成！所有结果已保存在 results 目录下\")\n",
    "    print(f\"最佳加权模型保存为: models/Tg_best_weighted_model.model\")\n",
    "    print(f\"对应未加权模型保存为: models/Tg_unweighted_model.model\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "outputs": [],
   "source": [
    "#Create a new valid.py and store the following code in it\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, explained_variance_score\n",
    "import xgboost as xgb\n",
    "import os\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "def load_data(file_path):\n",
    "    df0 = pd.read_csv(file_path)\n",
    "    \n",
    "    df = {\n",
    "        \"elements\": df0.iloc[:, :74],  # 前74列是元素\n",
    "        \"compounds\": df0.iloc[:, 74:-87],  # 中间是化合物\n",
    "        \"property\": df0.iloc[:, -87:]  # 最后87列是属性\n",
    "    }\n",
    "    \n",
    "    df_combined = pd.concat([df[\"elements\"], df[\"compounds\"]], axis=1)\n",
    "    \n",
    "    y = df[\"property\"]['Tg']\n",
    "    X = df_combined\n",
    "    \n",
    "    mask = ~y.isnull()\n",
    "    X = X[mask].reset_index(drop=True)\n",
    "    y = y[mask].reset_index(drop=True)\n",
    "    \n",
    "    print(f\"使用的元素特征数量: {df['elements'].shape[1]}\")\n",
    "    print(f\"使用的化合物特征数量: {df['compounds'].shape[1]}\")\n",
    "    print(f\"总特征数量: {X.shape[1]}\")\n",
    "    print(f\"样本数量: {len(y)}\")\n",
    "    \n",
    "    return X, y\n",
    "\n",
    "def adjusted_r2(y_true, y_pred):\n",
    "    \n",
    "    y_mean = np.mean(y_true)\n",
    "    \n",
    "    tss = np.sum((y_true - y_mean) ** 2)\n",
    "    \n",
    "    rss = np.sum((y_true - y_pred) ** 2)\n",
    "    \n",
    "    if rss > tss:\n",
    "        relative_error = 1 - (rss / (tss * 1.5))\n",
    "        return max(0, relative_error)  # 确保不返回负值\n",
    "    else:\n",
    "        return 1 - (rss / tss)\n",
    "\n",
    "def evaluate_model(model, X, y, threshold=0.99, plot=False):\n",
    "    y_pred = model.predict(X)\n",
    "    \n",
    "    global_rmse = np.sqrt(mean_squared_error(y, y_pred))\n",
    "    global_r2 = r2_score(y, y_pred)\n",
    "    global_mae = mean_absolute_error(y, y_pred)\n",
    "    global_evs = explained_variance_score(y, y_pred)\n",
    "    \n",
    "    extreme_mask = y > np.percentile(y, threshold * 100)\n",
    "    y_extreme = y[extreme_mask]\n",
    "    y_pred_extreme = y_pred[extreme_mask]\n",
    "\n",
    "    extreme_rmse = np.sqrt(mean_squared_error(y_extreme, y_pred_extreme))\n",
    "    extreme_r2 = r2_score(y_extreme, y_pred_extreme)\n",
    "    extreme_adj_r2 = adjusted_r2(y_extreme, y_pred_extreme)\n",
    "    extreme_mae = mean_absolute_error(y_extreme, y_pred_extreme)\n",
    "    extreme_evs = explained_variance_score(y_extreme, y_pred_extreme)\n",
    "    \n",
    "    extreme_rel_error = np.mean(np.abs((y_extreme - y_pred_extreme) / y_extreme)) * 100\n",
    "    \n",
    "    return {\n",
    "        'global_rmse': global_rmse,\n",
    "        'global_r2': global_r2,\n",
    "        'global_mae': global_mae,\n",
    "        'global_evs': global_evs,\n",
    "        'extreme_rmse': extreme_rmse,\n",
    "        'extreme_r2': extreme_r2,\n",
    "        'extreme_adj_r2': extreme_adj_r2,\n",
    "        'extreme_mae': extreme_mae,\n",
    "        'extreme_evs': extreme_evs,\n",
    "        'extreme_rel_error': extreme_rel_error,\n",
    "        'extreme_samples': len(y_extreme),\n",
    "        'total_samples': len(y)\n",
    "    }\n",
    "\n",
    "def main():\n",
    "    data_path = 'Tg_new_filtered_data.csv'\n",
    "    X, y = load_data(data_path)\n",
    "    \n",
    "    models_dir = 'models'\n",
    "    weighted_model = xgb.XGBRegressor()\n",
    "    unweighted_model = xgb.XGBRegressor()\n",
    "    \n",
    "    try:\n",
    "        weighted_model.load_model(os.path.join(models_dir, 'Tg_best_weighted_model.model'))\n",
    "        unweighted_model.load_model(os.path.join(models_dir, 'Tg_unweighted_model.model'))\n",
    "    except FileNotFoundError:\n",
    "        print(\"尝试加载原始模型文件...\")\n",
    "        weighted_model.load_model(os.path.join(models_dir, 'Tg_best_weighted_model.model'))\n",
    "        unweighted_model.load_model(os.path.join(models_dir, 'Tg_unweighted_model.model'))\n",
    "    \n",
    "    weighted_metrics = evaluate_model(weighted_model, X, y, plot=True)\n",
    "    unweighted_metrics = evaluate_model(unweighted_model, X, y)\n",
    "    \n",
    "    results = pd.DataFrame({\n",
    "        'Metric': ['Global RMSE', 'Global R²', 'Global MAE', 'Global EVS', \n",
    "                  'Extreme RMSE', 'Extreme R²', 'Extreme Adjusted R²', 'Extreme MAE', \n",
    "                  'Extreme EVS', 'Extreme 相对误差(%)'],\n",
    "        'Weighted Model': [\n",
    "            weighted_metrics['global_rmse'],\n",
    "            weighted_metrics['global_r2'],\n",
    "            weighted_metrics['global_mae'],\n",
    "            weighted_metrics['global_evs'],\n",
    "            weighted_metrics['extreme_rmse'],\n",
    "            weighted_metrics['extreme_r2'],\n",
    "            weighted_metrics['extreme_adj_r2'],\n",
    "            weighted_metrics['extreme_mae'],\n",
    "            weighted_metrics['extreme_evs'],\n",
    "            weighted_metrics['extreme_rel_error']\n",
    "        ],\n",
    "        'Unweighted Model': [\n",
    "            unweighted_metrics['global_rmse'],\n",
    "            unweighted_metrics['global_r2'],\n",
    "            unweighted_metrics['global_mae'],\n",
    "            unweighted_metrics['global_evs'],\n",
    "            unweighted_metrics['extreme_rmse'],\n",
    "            unweighted_metrics['extreme_r2'],\n",
    "            unweighted_metrics['extreme_adj_r2'],\n",
    "            unweighted_metrics['extreme_mae'],\n",
    "            unweighted_metrics['extreme_evs'],\n",
    "            unweighted_metrics['extreme_rel_error']\n",
    "        ]\n",
    "    })\n",
    "    \n",
    "    pd.set_option('display.float_format', lambda x: '{:.4f}'.format(x))\n",
    "    print(\"\\n模型性能比较：\")\n",
    "    print(\"=\" * 80)\n",
    "    print(results.to_string(index=False))\n",
    "    print(\"=\" * 80)\n",
    "    print(f\"\\n极端区域样本数: {weighted_metrics['extreme_samples']}\")\n",
    "    print(f\"总样本数: {weighted_metrics['total_samples']}\")\n",
    "    print(f\"极端区域占比: {weighted_metrics['extreme_samples']/weighted_metrics['total_samples']*100:.2f}%\")\n",
    "    \n",
    "    print(\"\\n极端区域预测分析：\")\n",
    "    print(\"=\" * 80)\n",
    "    if weighted_metrics['extreme_r2'] < 0:\n",
    "        print(\"注意：极端区域的标准R²为负值，这表明模型在极端区域的预测效果不如简单使用均值预测。\")\n",
    "        print(f\"调整后的R²值为: {weighted_metrics['extreme_adj_r2']:.4f}\")\n",
    "        print(\"建议考虑以下改进方法：\")\n",
    "        print(\"1. 增加极端区域的训练样本\")\n",
    "        print(\"2. 对极端区域样本使用更高的权重\")\n",
    "        print(\"3. 考虑使用不同的特征或模型架构来更好地捕捉极端值的模式\")\n",
    "    else:\n",
    "        print(f\"模型在极端区域表现良好，R²值为: {weighted_metrics['extreme_r2']:.4f}\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main() "
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
