{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Contepts\n",
    "#### Cross-Validation:\n",
    "- K-Fold Cross-Validation\n",
    "- Leave-One-Out Cross-Validation(LOOCV)\n",
    "- Stratified K-Fold Cross-Validation\n",
    "\n",
    "#### Statistical Evaluation Metrics\n",
    "##### Common Metrics:\n",
    "- Accuracy\n",
    "- Precision\n",
    "- Recall(Sensitivity)\n",
    "- F1 Score\n",
    "- ROC-AUC"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Remove Target"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Original DataFrame\n",
      "   ID              Name   Age  ...    JoinDate PerformanceScore Target\n",
      "0   1          John Doe  28.0  ...  2020-01-15        Excellent      1\n",
      "1   2        Jane Smith  34.0  ...  2019-03-22             Good      0\n",
      "2   3       Bob Johnson  45.0  ...  2018-07-30          Average      1\n",
      "3   4    Alice Williams  29.0  ...  2021-05-18        Excellent      0\n",
      "4   5       Chris Evans   NaN  ...  2017-11-01             Good      1\n",
      "5   6    Patricia Brown  38.0  ...  2016-09-12             Good      0\n",
      "6   7     Michael Davis  50.0  ...         NaN        Excellent      1\n",
      "7   8    Linda Martinez  27.0  ...  2020-02-25          Average      0\n",
      "8   9      James Wilson  31.0  ...  2019-12-05             Good      1\n",
      "9  10  Barbara Anderson  40.0  ...  2018-04-10        Excellent      0\n",
      "\n",
      "[10 rows x 8 columns]\n",
      "\n",
      "DataFrame after dropping 'Target' column:\n",
      "   ID              Name   Age  ...   Department    JoinDate PerformanceScore\n",
      "0   1          John Doe  28.0  ...        Sales  2020-01-15        Excellent\n",
      "1   2        Jane Smith  34.0  ...  Engineering  2019-03-22             Good\n",
      "2   3       Bob Johnson  45.0  ...           HR  2018-07-30          Average\n",
      "3   4    Alice Williams  29.0  ...        Sales  2021-05-18        Excellent\n",
      "4   5       Chris Evans   NaN  ...  Engineering  2017-11-01             Good\n",
      "5   6    Patricia Brown  38.0  ...           HR  2016-09-12             Good\n",
      "6   7     Michael Davis  50.0  ...        Sales         NaN        Excellent\n",
      "7   8    Linda Martinez  27.0  ...  Engineering  2020-02-25          Average\n",
      "8   9      James Wilson  31.0  ...           HR  2019-12-05             Good\n",
      "9  10  Barbara Anderson  40.0  ...        Sales  2018-04-10        Excellent\n",
      "\n",
      "[10 rows x 7 columns]\n",
      "\n",
      "Original DataFrame remains unchanged:\n",
      "   ID              Name   Age  ...    JoinDate PerformanceScore Target\n",
      "0   1          John Doe  28.0  ...  2020-01-15        Excellent      1\n",
      "1   2        Jane Smith  34.0  ...  2019-03-22             Good      0\n",
      "2   3       Bob Johnson  45.0  ...  2018-07-30          Average      1\n",
      "3   4    Alice Williams  29.0  ...  2021-05-18        Excellent      0\n",
      "4   5       Chris Evans   NaN  ...  2017-11-01             Good      1\n",
      "5   6    Patricia Brown  38.0  ...  2016-09-12             Good      0\n",
      "6   7     Michael Davis  50.0  ...         NaN        Excellent      1\n",
      "7   8    Linda Martinez  27.0  ...  2020-02-25          Average      0\n",
      "8   9      James Wilson  31.0  ...  2019-12-05             Good      1\n",
      "9  10  Barbara Anderson  40.0  ...  2018-04-10        Excellent      0\n",
      "\n",
      "[10 rows x 8 columns]\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split,cross_val_score,KFold\n",
    "from sklearn.preprocessing import StandardScaler,LabelEncoder\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,f1_score,roc_auc_score\n",
    "\n",
    "\n",
    "# Load data\n",
    "df = pd.read_csv('data.csv')\n",
    "\n",
    "print(\"Original DataFrame\")\n",
    "print(df)\n",
    "\n",
    "# Drop the 'Target' column\n",
    "df_features = df.drop('Target',axis=1)\n",
    "\n",
    "print(\"\\nDataFrame after dropping 'Target' column:\")\n",
    "print(df_features)\n",
    "\n",
    "print(\"\\nOriginal DataFrame remains unchanged:\")\n",
    "print(df)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Practice"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "           ID                 Name  Age  Salary   Department    JoinDate  \\\n",
      "0           1  Elizabeth Rodriguez   30   41158    Marketing  2022-11-02   \n",
      "1           2        James Calhoun   45   65783           HR  2020-02-03   \n",
      "2           3      Alexandra Moore   30   32787  Engineering  2020-05-14   \n",
      "3           4           Bruce Hunt   43   77059        Sales  2022-12-09   \n",
      "4           5        Andrew Willis   29   73470  Engineering  2023-04-14   \n",
      "...       ...                  ...  ...     ...          ...         ...   \n",
      "99995   99996      Tiffany Mcbride   28   56615           HR  2024-10-05   \n",
      "99996   99997       Angelica White   23   49051  Engineering  2022-02-21   \n",
      "99997   99998        Shane Burnett   39   55388        Sales  2023-09-25   \n",
      "99998   99999         Kathleen Lee   24   44912           HR  2021-09-07   \n",
      "99999  100000        David Barrett   22   65505           HR  2023-10-29   \n",
      "\n",
      "      PerformanceScore  Target  \n",
      "0            Excellent       0  \n",
      "1                 Poor       1  \n",
      "2                 Good       0  \n",
      "3                 Poor       0  \n",
      "4              Average       0  \n",
      "...                ...     ...  \n",
      "99995             Good       1  \n",
      "99996             Poor       1  \n",
      "99997        Excellent       1  \n",
      "99998        Excellent       0  \n",
      "99999             Poor       1  \n",
      "\n",
      "[100000 rows x 8 columns]\n",
      "\n",
      "Processed DataFrame:\n",
      "           ID   Name  Age  Salary  Department  JoinDate  PerformanceScore  \\\n",
      "0           1  22251   30   41158           2      1017                 1   \n",
      "1           2  27914   45   65783           1        14                 3   \n",
      "2           3   1306   30   32787           0       115                 2   \n",
      "3           4   9912   43   77059           3      1054                 3   \n",
      "4           5   4020   29   73470           0      1180                 0   \n",
      "...       ...    ...  ...     ...         ...       ...               ...   \n",
      "99995   99996  66627   28   56615           1      1720                 2   \n",
      "99996   99997   4522   23   49051           0       763                 3   \n",
      "99997   99998  61265   39   55388           3      1344                 1   \n",
      "99998   99999  37499   24   44912           1       596                 1   \n",
      "99999  100000  17529   22   65505           1      1378                 3   \n",
      "\n",
      "       Target  \n",
      "0           0  \n",
      "1           1  \n",
      "2           0  \n",
      "3           0  \n",
      "4           0  \n",
      "...       ...  \n",
      "99995       1  \n",
      "99996       1  \n",
      "99997       1  \n",
      "99998       0  \n",
      "99999       1  \n",
      "\n",
      "[100000 rows x 8 columns]\n",
      "\n",
      "Scaled DataFrame:\n",
      "             ID   Name       Age    Salary  Department  JoinDate  \\\n",
      "0     -1.732033  22251 -0.822832 -1.183624           2      1017   \n",
      "1     -1.731999  27914  0.476320  0.036478           1        14   \n",
      "2     -1.731964   1306 -0.822832 -1.598385           0       115   \n",
      "3     -1.731930   9912  0.303100  0.595173           3      1054   \n",
      "4     -1.731895   4020 -0.909442  0.417348           0      1180   \n",
      "...         ...    ...       ...       ...         ...       ...   \n",
      "99995  1.731895  66627 -0.996052 -0.417772           1      1720   \n",
      "99996  1.731930   4522 -1.429103 -0.792547           0       763   \n",
      "99997  1.731964  61265 -0.043341 -0.478566           3      1344   \n",
      "99998  1.731999  37499 -1.342493 -0.997624           1       596   \n",
      "99999  1.732033  17529 -1.515713  0.022704           1      1378   \n",
      "\n",
      "       PerformanceScore  Target  \n",
      "0                     1       0  \n",
      "1                     3       1  \n",
      "2                     2       0  \n",
      "3                     3       0  \n",
      "4                     0       0  \n",
      "...                 ...     ...  \n",
      "99995                 2       1  \n",
      "99996                 3       1  \n",
      "99997                 1       1  \n",
      "99998                 1       0  \n",
      "99999                 3       1  \n",
      "\n",
      "[100000 rows x 8 columns]\n",
      "\n",
      "Features (X):\n",
      "         ID   Name       Age    Salary  Department  JoinDate  PerformanceScore\n",
      "0 -1.732033  22251 -0.822832 -1.183624           2      1017                 1\n",
      "1 -1.731999  27914  0.476320  0.036478           1        14                 3\n",
      "2 -1.731964   1306 -0.822832 -1.598385           0       115                 2\n",
      "3 -1.731930   9912  0.303100  0.595173           3      1054                 3\n",
      "4 -1.731895   4020 -0.909442  0.417348           0      1180                 0\n",
      "\n",
      "Target (y):\n",
      "0        0\n",
      "1        1\n",
      "2        0\n",
      "3        0\n",
      "4        0\n",
      "        ..\n",
      "99995    1\n",
      "99996    1\n",
      "99997    1\n",
      "99998    0\n",
      "99999    1\n",
      "Name: Target, Length: 100000, dtype: int64\n",
      "Cross-validation Accuracy: 0.5015600000000001\n",
      "Cross-validation Accuracy (Random Forest): 0.501\n",
      "Accuracy:0.5036\n",
      "Precision:0.5032668011939976\n",
      "Recall:0.5036\n",
      "F1 Score:0.48935063154027414\n",
      "ROC-AUC:0.5042958057780139\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split,cross_val_score,KFold\n",
    "from sklearn.preprocessing import StandardScaler,LabelEncoder\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,f1_score,roc_auc_score\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "\n",
    "#Load data\n",
    "df = pd.read_csv('data.csv')\n",
    "\n",
    "\n",
    "# Inspect the data\n",
    "print(df)\n",
    "\n",
    "\n",
    "# Process Data\n",
    "# Handle missing values\n",
    "numberical_cols= df.select_dtypes(include=['number']).columns\n",
    "numberical_cols = numberical_cols.drop('Target')\n",
    "df[numberical_cols] = df[numberical_cols].fillna(df[numberical_cols].mean())\n",
    "\n",
    "\n",
    "#Encode categorical variables\n",
    "label_encoders = {}\n",
    "for column in df.select_dtypes(include=[object]).columns:\n",
    "   label_encoders[column] = LabelEncoder()\n",
    "   df[column] = label_encoders[column].fit_transform(df[column])\n",
    "\n",
    "print(\"\\nProcessed DataFrame:\")\n",
    "print(df)\n",
    "\n",
    "\n",
    "# Scale numberical features\n",
    "scaler = StandardScaler()\n",
    "\n",
    "df[numberical_cols] = scaler.fit_transform(df[numberical_cols])\n",
    "\n",
    "\n",
    "# Inspect the scaled data\n",
    "print(\"\\nScaled DataFrame:\")\n",
    "print(df)\n",
    "\n",
    "\n",
    "# Split data into features and target\n",
    "X = df.drop('Target',axis=1)\n",
    "y = df['Target']\n",
    "\n",
    "\n",
    "print(\"\\nFeatures (X):\")\n",
    "print(X.head())\n",
    "print(\"\\nTarget (y):\")\n",
    "print(y)\n",
    "\n",
    "\n",
    "#Cross Validation\n",
    "kf=KFold(n_splits=5,shuffle=True,random_state=42)\n",
    "model=LogisticRegression(max_iter=100000)\n",
    "\n",
    "#Evalueate model using cross-validation\n",
    "cv_results = cross_val_score(model,X,y,cv=kf,scoring='accuracy')\n",
    "print(f'Cross-validation Accuracy: {cv_results.mean()}')\n",
    "\n",
    "\n",
    "# Cross-validation with Random Forest\n",
    "model_rf = RandomForestClassifier(random_state=42)\n",
    "cv_results_rf = cross_val_score(model_rf,X,y,cv=kf,scoring='accuracy')\n",
    "print(f'Cross-validation Accuracy (Random Forest): {cv_results_rf.mean()}')\n",
    "\n",
    "\n",
    "\n",
    "# Train and evaluate model\n",
    "X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=42)\n",
    "model.fit(X_train,y_train)\n",
    "y_pred = model.predict(X_test)\n",
    "\n",
    "#Calculate evaluation metrics\n",
    "accuracy=accuracy_score(y_test,y_pred)\n",
    "precision=precision_score(y_test,y_pred,average='weighted')\n",
    "recall=recall_score(y_test,y_pred,average='weighted')\n",
    "f1=f1_score(y_test,y_pred,average='weighted')\n",
    "roc_auc=roc_auc_score(y_test,model.predict_proba(X_test)[:,1])\n",
    "\n",
    "print(f'Accuracy:{accuracy}')\n",
    "print(f'Precision:{precision}')\n",
    "print(f'Recall:{recall}')\n",
    "print(f'F1 Score:{f1}')\n",
    "print(f'ROC-AUC:{roc_auc}')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 100条数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "![100条数据](./screenshot-20250120-144947.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1000条数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![1000](./1000-screenshot-20250120-145608.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 10000条数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![10000](./10000-screenshot-20250120-145930.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 10000条数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![100000](./100000-screenshot-20250120-150511.png)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llm-learning",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
